1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 static const std::vector<RtAudio::Api> init_compiledApis() {
156 return std::vector<RtAudio::Api>(
157 rtaudio_compiled_apis, rtaudio_compiled_apis + rtaudio_num_compiled_apis);
159 const std::vector<RtAudio::Api> RtAudio::compiledApis(init_compiledApis());
161 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
162 // If the build breaks here, check that they match.
163 template<bool b> class StaticAssert { private: StaticAssert() {} };
164 template<> class StaticAssert<true>{ public: StaticAssert() {} };
165 class StaticAssertions { StaticAssertions() {
166 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
169 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
174 const std::vector<RtAudio::Api>& RtAudio :: getCompiledApis()
179 const std::string RtAudio :: getCompiledApiName( RtAudio::Api api )
181 if (api < 0 || api > RtAudio::NUM_APIS
182 || (std::find(RtAudio::compiledApis.begin(),
183 RtAudio::compiledApis.end(), api) == RtAudio::compiledApis.end()))
185 return rtaudio_api_names[api][0];
188 const std::string RtAudio :: getCompiledApiDisplayName( RtAudio::Api api )
190 if (api < 0 || api > RtAudio::NUM_APIS
191 || (std::find(RtAudio::compiledApis.begin(),
192 RtAudio::compiledApis.end(), api) == RtAudio::compiledApis.end()))
194 return rtaudio_api_names[api][1];
197 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
200 std::vector<RtAudio::Api>::const_iterator it;
201 for (it = compiledApis.begin(); it != compiledApis.end(); ++it, ++i)
202 if (name == rtaudio_api_names[*it][0])
204 return RtAudio::UNSPECIFIED;
207 void RtAudio :: openRtApi( RtAudio::Api api )
213 #if defined(__UNIX_JACK__)
214 if ( api == UNIX_JACK )
215 rtapi_ = new RtApiJack();
217 #if defined(__LINUX_ALSA__)
218 if ( api == LINUX_ALSA )
219 rtapi_ = new RtApiAlsa();
221 #if defined(__LINUX_PULSE__)
222 if ( api == LINUX_PULSE )
223 rtapi_ = new RtApiPulse();
225 #if defined(__LINUX_OSS__)
226 if ( api == LINUX_OSS )
227 rtapi_ = new RtApiOss();
229 #if defined(__WINDOWS_ASIO__)
230 if ( api == WINDOWS_ASIO )
231 rtapi_ = new RtApiAsio();
233 #if defined(__WINDOWS_WASAPI__)
234 if ( api == WINDOWS_WASAPI )
235 rtapi_ = new RtApiWasapi();
237 #if defined(__WINDOWS_DS__)
238 if ( api == WINDOWS_DS )
239 rtapi_ = new RtApiDs();
241 #if defined(__MACOSX_CORE__)
242 if ( api == MACOSX_CORE )
243 rtapi_ = new RtApiCore();
245 #if defined(__RTAUDIO_DUMMY__)
246 if ( api == RTAUDIO_DUMMY )
247 rtapi_ = new RtApiDummy();
251 RtAudio :: RtAudio( RtAudio::Api api )
255 if ( api != UNSPECIFIED ) {
256 // Attempt to open the specified API.
258 if ( rtapi_ ) return;
260 // No compiled support for specified API value. Issue a debug
261 // warning and continue as if no API was specified.
262 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
265 // Iterate through the compiled APIs and return as soon as we find
266 // one with at least one device or we reach the end of the list.
267 std::vector< RtAudio::Api > apis;
268 getCompiledApi( apis );
269 for ( unsigned int i=0; i<apis.size(); i++ ) {
270 openRtApi( apis[i] );
271 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
274 if ( rtapi_ ) return;
276 // It should not be possible to get here because the preprocessor
277 // definition __RTAUDIO_DUMMY__ is automatically defined if no
278 // API-specific definitions are passed to the compiler. But just in
279 // case something weird happens, we'll thow an error.
280 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
281 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
284 RtAudio :: ~RtAudio()
290 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
291 RtAudio::StreamParameters *inputParameters,
292 RtAudioFormat format, unsigned int sampleRate,
293 unsigned int *bufferFrames,
294 RtAudioCallback callback, void *userData,
295 RtAudio::StreamOptions *options,
296 RtAudioErrorCallback errorCallback )
298 return rtapi_->openStream( outputParameters, inputParameters, format,
299 sampleRate, bufferFrames, callback,
300 userData, options, errorCallback );
303 // *************************************************** //
305 // Public RtApi definitions (see end of file for
306 // private or protected utility functions).
308 // *************************************************** //
312 stream_.state = STREAM_CLOSED;
313 stream_.mode = UNINITIALIZED;
314 stream_.apiHandle = 0;
315 stream_.userBuffer[0] = 0;
316 stream_.userBuffer[1] = 0;
317 MUTEX_INITIALIZE( &stream_.mutex );
318 showWarnings_ = true;
319 firstErrorOccurred_ = false;
324 MUTEX_DESTROY( &stream_.mutex );
327 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
328 RtAudio::StreamParameters *iParams,
329 RtAudioFormat format, unsigned int sampleRate,
330 unsigned int *bufferFrames,
331 RtAudioCallback callback, void *userData,
332 RtAudio::StreamOptions *options,
333 RtAudioErrorCallback errorCallback )
335 if ( stream_.state != STREAM_CLOSED ) {
336 errorText_ = "RtApi::openStream: a stream is already open!";
337 error( RtAudioError::INVALID_USE );
341 // Clear stream information potentially left from a previously open stream.
344 if ( oParams && oParams->nChannels < 1 ) {
345 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
346 error( RtAudioError::INVALID_USE );
350 if ( iParams && iParams->nChannels < 1 ) {
351 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
352 error( RtAudioError::INVALID_USE );
356 if ( oParams == NULL && iParams == NULL ) {
357 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
358 error( RtAudioError::INVALID_USE );
362 if ( formatBytes(format) == 0 ) {
363 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
364 error( RtAudioError::INVALID_USE );
368 unsigned int nDevices = getDeviceCount();
369 unsigned int oChannels = 0;
371 oChannels = oParams->nChannels;
372 if ( oParams->deviceId >= nDevices ) {
373 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
374 error( RtAudioError::INVALID_USE );
379 unsigned int iChannels = 0;
381 iChannels = iParams->nChannels;
382 if ( iParams->deviceId >= nDevices ) {
383 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
384 error( RtAudioError::INVALID_USE );
391 if ( oChannels > 0 ) {
393 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
394 sampleRate, format, bufferFrames, options );
395 if ( result == false ) {
396 error( RtAudioError::SYSTEM_ERROR );
401 if ( iChannels > 0 ) {
403 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
404 sampleRate, format, bufferFrames, options );
405 if ( result == false ) {
406 if ( oChannels > 0 ) closeStream();
407 error( RtAudioError::SYSTEM_ERROR );
412 stream_.callbackInfo.callback = (void *) callback;
413 stream_.callbackInfo.userData = userData;
414 stream_.callbackInfo.errorCallback = (void *) errorCallback;
416 if ( options ) options->numberOfBuffers = stream_.nBuffers;
417 stream_.state = STREAM_STOPPED;
420 unsigned int RtApi :: getDefaultInputDevice( void )
422 // Should be implemented in subclasses if possible.
426 unsigned int RtApi :: getDefaultOutputDevice( void )
428 // Should be implemented in subclasses if possible.
432 void RtApi :: closeStream( void )
434 // MUST be implemented in subclasses!
438 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
439 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
440 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
441 RtAudio::StreamOptions * /*options*/ )
443 // MUST be implemented in subclasses!
447 void RtApi :: tickStreamTime( void )
449 // Subclasses that do not provide their own implementation of
450 // getStreamTime should call this function once per buffer I/O to
451 // provide basic stream time support.
453 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
455 #if defined( HAVE_GETTIMEOFDAY )
456 gettimeofday( &stream_.lastTickTimestamp, NULL );
460 long RtApi :: getStreamLatency( void )
464 long totalLatency = 0;
465 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
466 totalLatency = stream_.latency[0];
467 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
468 totalLatency += stream_.latency[1];
473 double RtApi :: getStreamTime( void )
477 #if defined( HAVE_GETTIMEOFDAY )
478 // Return a very accurate estimate of the stream time by
479 // adding in the elapsed time since the last tick.
483 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
484 return stream_.streamTime;
486 gettimeofday( &now, NULL );
487 then = stream_.lastTickTimestamp;
488 return stream_.streamTime +
489 ((now.tv_sec + 0.000001 * now.tv_usec) -
490 (then.tv_sec + 0.000001 * then.tv_usec));
492 return stream_.streamTime;
496 void RtApi :: setStreamTime( double time )
501 stream_.streamTime = time;
502 #if defined( HAVE_GETTIMEOFDAY )
503 gettimeofday( &stream_.lastTickTimestamp, NULL );
507 unsigned int RtApi :: getStreamSampleRate( void )
511 return stream_.sampleRate;
515 // *************************************************** //
517 // OS/API-specific methods.
519 // *************************************************** //
521 #if defined(__MACOSX_CORE__)
523 // The OS X CoreAudio API is designed to use a separate callback
524 // procedure for each of its audio devices. A single RtAudio duplex
525 // stream using two different devices is supported here, though it
526 // cannot be guaranteed to always behave correctly because we cannot
527 // synchronize these two callbacks.
529 // A property listener is installed for over/underrun information.
530 // However, no functionality is currently provided to allow property
531 // listeners to trigger user handlers because it is unclear what could
532 // be done if a critical stream parameter (buffer size, sample rate,
533 // device disconnect) notification arrived. The listeners entail
534 // quite a bit of extra code and most likely, a user program wouldn't
535 // be prepared for the result anyway. However, we do provide a flag
536 // to the client callback function to inform of an over/underrun.
538 // A structure to hold various information related to the CoreAudio API
541 AudioDeviceID id[2]; // device ids
542 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
543 AudioDeviceIOProcID procId[2];
545 UInt32 iStream[2]; // device stream index (or first if using multiple)
546 UInt32 nStreams[2]; // number of streams to use
549 pthread_cond_t condition;
550 int drainCounter; // Tracks callback counts when draining
551 bool internalDrain; // Indicates if stop is initiated from callback or not.
554 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
557 RtApiCore:: RtApiCore()
559 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
560 // This is a largely undocumented but absolutely necessary
561 // requirement starting with OS-X 10.6. If not called, queries and
562 // updates to various audio device properties are not handled
564 CFRunLoopRef theRunLoop = NULL;
565 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
566 kAudioObjectPropertyScopeGlobal,
567 kAudioObjectPropertyElementMaster };
568 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
569 if ( result != noErr ) {
570 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
571 error( RtAudioError::WARNING );
576 RtApiCore :: ~RtApiCore()
578 // The subclass destructor gets called before the base class
579 // destructor, so close an existing stream before deallocating
580 // apiDeviceId memory.
581 if ( stream_.state != STREAM_CLOSED ) closeStream();
584 unsigned int RtApiCore :: getDeviceCount( void )
586 // Find out how many audio devices there are, if any.
588 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
589 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
590 if ( result != noErr ) {
591 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
592 error( RtAudioError::WARNING );
596 return dataSize / sizeof( AudioDeviceID );
599 unsigned int RtApiCore :: getDefaultInputDevice( void )
601 unsigned int nDevices = getDeviceCount();
602 if ( nDevices <= 1 ) return 0;
605 UInt32 dataSize = sizeof( AudioDeviceID );
606 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
607 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
608 if ( result != noErr ) {
609 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
610 error( RtAudioError::WARNING );
614 dataSize *= nDevices;
615 AudioDeviceID deviceList[ nDevices ];
616 property.mSelector = kAudioHardwarePropertyDevices;
617 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
618 if ( result != noErr ) {
619 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
620 error( RtAudioError::WARNING );
624 for ( unsigned int i=0; i<nDevices; i++ )
625 if ( id == deviceList[i] ) return i;
627 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
628 error( RtAudioError::WARNING );
632 unsigned int RtApiCore :: getDefaultOutputDevice( void )
634 unsigned int nDevices = getDeviceCount();
635 if ( nDevices <= 1 ) return 0;
638 UInt32 dataSize = sizeof( AudioDeviceID );
639 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
640 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
641 if ( result != noErr ) {
642 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
643 error( RtAudioError::WARNING );
647 dataSize = sizeof( AudioDeviceID ) * nDevices;
648 AudioDeviceID deviceList[ nDevices ];
649 property.mSelector = kAudioHardwarePropertyDevices;
650 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
651 if ( result != noErr ) {
652 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
653 error( RtAudioError::WARNING );
657 for ( unsigned int i=0; i<nDevices; i++ )
658 if ( id == deviceList[i] ) return i;
660 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
661 error( RtAudioError::WARNING );
665 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
667 RtAudio::DeviceInfo info;
671 unsigned int nDevices = getDeviceCount();
672 if ( nDevices == 0 ) {
673 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
674 error( RtAudioError::INVALID_USE );
678 if ( device >= nDevices ) {
679 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
680 error( RtAudioError::INVALID_USE );
684 AudioDeviceID deviceList[ nDevices ];
685 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
686 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
687 kAudioObjectPropertyScopeGlobal,
688 kAudioObjectPropertyElementMaster };
689 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
690 0, NULL, &dataSize, (void *) &deviceList );
691 if ( result != noErr ) {
692 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
693 error( RtAudioError::WARNING );
697 AudioDeviceID id = deviceList[ device ];
699 // Get the device name.
702 dataSize = sizeof( CFStringRef );
703 property.mSelector = kAudioObjectPropertyManufacturer;
704 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
705 if ( result != noErr ) {
706 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
707 errorText_ = errorStream_.str();
708 error( RtAudioError::WARNING );
712 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
713 int length = CFStringGetLength(cfname);
714 char *mname = (char *)malloc(length * 3 + 1);
715 #if defined( UNICODE ) || defined( _UNICODE )
716 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
718 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
720 info.name.append( (const char *)mname, strlen(mname) );
721 info.name.append( ": " );
725 property.mSelector = kAudioObjectPropertyName;
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
727 if ( result != noErr ) {
728 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
729 errorText_ = errorStream_.str();
730 error( RtAudioError::WARNING );
734 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
735 length = CFStringGetLength(cfname);
736 char *name = (char *)malloc(length * 3 + 1);
737 #if defined( UNICODE ) || defined( _UNICODE )
738 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
740 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
742 info.name.append( (const char *)name, strlen(name) );
746 // Get the output stream "configuration".
747 AudioBufferList *bufferList = nil;
748 property.mSelector = kAudioDevicePropertyStreamConfiguration;
749 property.mScope = kAudioDevicePropertyScopeOutput;
750 // property.mElement = kAudioObjectPropertyElementWildcard;
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
753 if ( result != noErr || dataSize == 0 ) {
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
755 errorText_ = errorStream_.str();
756 error( RtAudioError::WARNING );
760 // Allocate the AudioBufferList.
761 bufferList = (AudioBufferList *) malloc( dataSize );
762 if ( bufferList == NULL ) {
763 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
764 error( RtAudioError::WARNING );
768 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
769 if ( result != noErr || dataSize == 0 ) {
771 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
772 errorText_ = errorStream_.str();
773 error( RtAudioError::WARNING );
777 // Get output channel information.
778 unsigned int i, nStreams = bufferList->mNumberBuffers;
779 for ( i=0; i<nStreams; i++ )
780 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
783 // Get the input stream "configuration".
784 property.mScope = kAudioDevicePropertyScopeInput;
785 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
786 if ( result != noErr || dataSize == 0 ) {
787 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
788 errorText_ = errorStream_.str();
789 error( RtAudioError::WARNING );
793 // Allocate the AudioBufferList.
794 bufferList = (AudioBufferList *) malloc( dataSize );
795 if ( bufferList == NULL ) {
796 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
797 error( RtAudioError::WARNING );
801 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
802 if (result != noErr || dataSize == 0) {
804 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
805 errorText_ = errorStream_.str();
806 error( RtAudioError::WARNING );
810 // Get input channel information.
811 nStreams = bufferList->mNumberBuffers;
812 for ( i=0; i<nStreams; i++ )
813 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
816 // If device opens for both playback and capture, we determine the channels.
817 if ( info.outputChannels > 0 && info.inputChannels > 0 )
818 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
820 // Probe the device sample rates.
821 bool isInput = false;
822 if ( info.outputChannels == 0 ) isInput = true;
824 // Determine the supported sample rates.
825 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
826 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
827 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
828 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
829 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
830 errorText_ = errorStream_.str();
831 error( RtAudioError::WARNING );
835 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
836 AudioValueRange rangeList[ nRanges ];
837 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
838 if ( result != kAudioHardwareNoError ) {
839 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
840 errorText_ = errorStream_.str();
841 error( RtAudioError::WARNING );
845 // The sample rate reporting mechanism is a bit of a mystery. It
846 // seems that it can either return individual rates or a range of
847 // rates. I assume that if the min / max range values are the same,
848 // then that represents a single supported rate and if the min / max
849 // range values are different, the device supports an arbitrary
850 // range of values (though there might be multiple ranges, so we'll
851 // use the most conservative range).
852 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
853 bool haveValueRange = false;
854 info.sampleRates.clear();
855 for ( UInt32 i=0; i<nRanges; i++ ) {
856 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
857 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
858 info.sampleRates.push_back( tmpSr );
860 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
861 info.preferredSampleRate = tmpSr;
864 haveValueRange = true;
865 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
866 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
870 if ( haveValueRange ) {
871 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
872 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
873 info.sampleRates.push_back( SAMPLE_RATES[k] );
875 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
876 info.preferredSampleRate = SAMPLE_RATES[k];
881 // Sort and remove any redundant values
882 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
883 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
885 if ( info.sampleRates.size() == 0 ) {
886 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
887 errorText_ = errorStream_.str();
888 error( RtAudioError::WARNING );
892 // CoreAudio always uses 32-bit floating point data for PCM streams.
893 // Thus, any other "physical" formats supported by the device are of
894 // no interest to the client.
895 info.nativeFormats = RTAUDIO_FLOAT32;
897 if ( info.outputChannels > 0 )
898 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
899 if ( info.inputChannels > 0 )
900 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
906 static OSStatus callbackHandler( AudioDeviceID inDevice,
907 const AudioTimeStamp* /*inNow*/,
908 const AudioBufferList* inInputData,
909 const AudioTimeStamp* /*inInputTime*/,
910 AudioBufferList* outOutputData,
911 const AudioTimeStamp* /*inOutputTime*/,
914 CallbackInfo *info = (CallbackInfo *) infoPointer;
916 RtApiCore *object = (RtApiCore *) info->object;
917 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
918 return kAudioHardwareUnspecifiedError;
920 return kAudioHardwareNoError;
923 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
925 const AudioObjectPropertyAddress properties[],
926 void* handlePointer )
928 CoreHandle *handle = (CoreHandle *) handlePointer;
929 for ( UInt32 i=0; i<nAddresses; i++ ) {
930 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
931 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
932 handle->xrun[1] = true;
934 handle->xrun[0] = true;
938 return kAudioHardwareNoError;
941 static OSStatus rateListener( AudioObjectID inDevice,
942 UInt32 /*nAddresses*/,
943 const AudioObjectPropertyAddress /*properties*/[],
946 Float64 *rate = (Float64 *) ratePointer;
947 UInt32 dataSize = sizeof( Float64 );
948 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
949 kAudioObjectPropertyScopeGlobal,
950 kAudioObjectPropertyElementMaster };
951 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
952 return kAudioHardwareNoError;
955 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
956 unsigned int firstChannel, unsigned int sampleRate,
957 RtAudioFormat format, unsigned int *bufferSize,
958 RtAudio::StreamOptions *options )
961 unsigned int nDevices = getDeviceCount();
962 if ( nDevices == 0 ) {
963 // This should not happen because a check is made before this function is called.
964 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
968 if ( device >= nDevices ) {
969 // This should not happen because a check is made before this function is called.
970 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
974 AudioDeviceID deviceList[ nDevices ];
975 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
976 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
977 kAudioObjectPropertyScopeGlobal,
978 kAudioObjectPropertyElementMaster };
979 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
980 0, NULL, &dataSize, (void *) &deviceList );
981 if ( result != noErr ) {
982 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
986 AudioDeviceID id = deviceList[ device ];
988 // Setup for stream mode.
989 bool isInput = false;
990 if ( mode == INPUT ) {
992 property.mScope = kAudioDevicePropertyScopeInput;
995 property.mScope = kAudioDevicePropertyScopeOutput;
997 // Get the stream "configuration".
998 AudioBufferList *bufferList = nil;
1000 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1001 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1002 if ( result != noErr || dataSize == 0 ) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Allocate the AudioBufferList.
1009 bufferList = (AudioBufferList *) malloc( dataSize );
1010 if ( bufferList == NULL ) {
1011 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1015 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1016 if (result != noErr || dataSize == 0) {
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1019 errorText_ = errorStream_.str();
1023 // Search for one or more streams that contain the desired number of
1024 // channels. CoreAudio devices can have an arbitrary number of
1025 // streams and each stream can have an arbitrary number of channels.
1026 // For each stream, a single buffer of interleaved samples is
1027 // provided. RtAudio prefers the use of one stream of interleaved
1028 // data or multiple consecutive single-channel streams. However, we
1029 // now support multiple consecutive multi-channel streams of
1030 // interleaved data as well.
1031 UInt32 iStream, offsetCounter = firstChannel;
1032 UInt32 nStreams = bufferList->mNumberBuffers;
1033 bool monoMode = false;
1034 bool foundStream = false;
1036 // First check that the device supports the requested number of
1038 UInt32 deviceChannels = 0;
1039 for ( iStream=0; iStream<nStreams; iStream++ )
1040 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1042 if ( deviceChannels < ( channels + firstChannel ) ) {
1044 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1045 errorText_ = errorStream_.str();
1049 // Look for a single stream meeting our needs.
1050 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1051 for ( iStream=0; iStream<nStreams; iStream++ ) {
1052 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1053 if ( streamChannels >= channels + offsetCounter ) {
1054 firstStream = iStream;
1055 channelOffset = offsetCounter;
1059 if ( streamChannels > offsetCounter ) break;
1060 offsetCounter -= streamChannels;
1063 // If we didn't find a single stream above, then we should be able
1064 // to meet the channel specification with multiple streams.
1065 if ( foundStream == false ) {
1067 offsetCounter = firstChannel;
1068 for ( iStream=0; iStream<nStreams; iStream++ ) {
1069 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1070 if ( streamChannels > offsetCounter ) break;
1071 offsetCounter -= streamChannels;
1074 firstStream = iStream;
1075 channelOffset = offsetCounter;
1076 Int32 channelCounter = channels + offsetCounter - streamChannels;
1078 if ( streamChannels > 1 ) monoMode = false;
1079 while ( channelCounter > 0 ) {
1080 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1081 if ( streamChannels > 1 ) monoMode = false;
1082 channelCounter -= streamChannels;
1089 // Determine the buffer size.
1090 AudioValueRange bufferRange;
1091 dataSize = sizeof( AudioValueRange );
1092 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1093 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1095 if ( result != noErr ) {
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1097 errorText_ = errorStream_.str();
1101 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1102 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1103 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1105 // Set the buffer size. For multiple streams, I'm assuming we only
1106 // need to make this setting for the master channel.
1107 UInt32 theSize = (UInt32) *bufferSize;
1108 dataSize = sizeof( UInt32 );
1109 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1110 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1112 if ( result != noErr ) {
1113 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1114 errorText_ = errorStream_.str();
1118 // If attempting to setup a duplex stream, the bufferSize parameter
1119 // MUST be the same in both directions!
1120 *bufferSize = theSize;
1121 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1123 errorText_ = errorStream_.str();
1127 stream_.bufferSize = *bufferSize;
1128 stream_.nBuffers = 1;
1130 // Try to set "hog" mode ... it's not clear to me this is working.
1131 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1133 dataSize = sizeof( hog_pid );
1134 property.mSelector = kAudioDevicePropertyHogMode;
1135 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1136 if ( result != noErr ) {
1137 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1138 errorText_ = errorStream_.str();
1142 if ( hog_pid != getpid() ) {
1144 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1145 if ( result != noErr ) {
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1147 errorText_ = errorStream_.str();
1153 // Check and if necessary, change the sample rate for the device.
1154 Float64 nominalRate;
1155 dataSize = sizeof( Float64 );
1156 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1157 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1158 if ( result != noErr ) {
1159 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1160 errorText_ = errorStream_.str();
1164 // Only change the sample rate if off by more than 1 Hz.
1165 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1167 // Set a property listener for the sample rate change
1168 Float64 reportedRate = 0.0;
1169 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1170 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 nominalRate = (Float64) sampleRate;
1178 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1179 if ( result != noErr ) {
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1181 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1182 errorText_ = errorStream_.str();
1186 // Now wait until the reported nominal rate is what we just set.
1187 UInt32 microCounter = 0;
1188 while ( reportedRate != nominalRate ) {
1189 microCounter += 5000;
1190 if ( microCounter > 5000000 ) break;
1194 // Remove the property listener.
1195 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1197 if ( microCounter > 5000000 ) {
1198 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1199 errorText_ = errorStream_.str();
1204 // Now set the stream format for all streams. Also, check the
1205 // physical format of the device and change that if necessary.
1206 AudioStreamBasicDescription description;
1207 dataSize = sizeof( AudioStreamBasicDescription );
1208 property.mSelector = kAudioStreamPropertyVirtualFormat;
1209 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1210 if ( result != noErr ) {
1211 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1212 errorText_ = errorStream_.str();
1216 // Set the sample rate and data format id. However, only make the
1217 // change if the sample rate is not within 1.0 of the desired
1218 // rate and the format is not linear pcm.
1219 bool updateFormat = false;
1220 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1221 description.mSampleRate = (Float64) sampleRate;
1222 updateFormat = true;
1225 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1226 description.mFormatID = kAudioFormatLinearPCM;
1227 updateFormat = true;
1230 if ( updateFormat ) {
1231 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1232 if ( result != noErr ) {
1233 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1234 errorText_ = errorStream_.str();
1239 // Now check the physical format.
1240 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1241 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1242 if ( result != noErr ) {
1243 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1244 errorText_ = errorStream_.str();
1248 //std::cout << "Current physical stream format:" << std::endl;
1249 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1250 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1251 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1252 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1254 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1255 description.mFormatID = kAudioFormatLinearPCM;
1256 //description.mSampleRate = (Float64) sampleRate;
1257 AudioStreamBasicDescription testDescription = description;
1260 // We'll try higher bit rates first and then work our way down.
1261 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1262 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1263 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1264 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1265 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1266 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1267 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1268 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1269 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1270 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1271 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1272 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1273 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1275 bool setPhysicalFormat = false;
1276 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1277 testDescription = description;
1278 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1279 testDescription.mFormatFlags = physicalFormats[i].second;
1280 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1281 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1283 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1284 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1285 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1286 if ( result == noErr ) {
1287 setPhysicalFormat = true;
1288 //std::cout << "Updated physical stream format:" << std::endl;
1289 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1290 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1291 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1292 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1297 if ( !setPhysicalFormat ) {
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1302 } // done setting virtual/physical formats.
1304 // Get the stream / device latency.
1306 dataSize = sizeof( UInt32 );
1307 property.mSelector = kAudioDevicePropertyLatency;
1308 if ( AudioObjectHasProperty( id, &property ) == true ) {
1309 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1310 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1312 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1313 errorText_ = errorStream_.str();
1314 error( RtAudioError::WARNING );
1318 // Byte-swapping: According to AudioHardware.h, the stream data will
1319 // always be presented in native-endian format, so we should never
1320 // need to byte swap.
1321 stream_.doByteSwap[mode] = false;
1323 // From the CoreAudio documentation, PCM data must be supplied as
1325 stream_.userFormat = format;
1326 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1328 if ( streamCount == 1 )
1329 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1330 else // multiple streams
1331 stream_.nDeviceChannels[mode] = channels;
1332 stream_.nUserChannels[mode] = channels;
1333 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1334 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1335 else stream_.userInterleaved = true;
1336 stream_.deviceInterleaved[mode] = true;
1337 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1339 // Set flags for buffer conversion.
1340 stream_.doConvertBuffer[mode] = false;
1341 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1342 stream_.doConvertBuffer[mode] = true;
1343 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1344 stream_.doConvertBuffer[mode] = true;
1345 if ( streamCount == 1 ) {
1346 if ( stream_.nUserChannels[mode] > 1 &&
1347 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1348 stream_.doConvertBuffer[mode] = true;
1350 else if ( monoMode && stream_.userInterleaved )
1351 stream_.doConvertBuffer[mode] = true;
1353 // Allocate our CoreHandle structure for the stream.
1354 CoreHandle *handle = 0;
1355 if ( stream_.apiHandle == 0 ) {
1357 handle = new CoreHandle;
1359 catch ( std::bad_alloc& ) {
1360 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1364 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1365 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1368 stream_.apiHandle = (void *) handle;
1371 handle = (CoreHandle *) stream_.apiHandle;
1372 handle->iStream[mode] = firstStream;
1373 handle->nStreams[mode] = streamCount;
1374 handle->id[mode] = id;
1376 // Allocate necessary internal buffers.
1377 unsigned long bufferBytes;
1378 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1379 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1380 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1381 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1382 if ( stream_.userBuffer[mode] == NULL ) {
1383 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1387 // If possible, we will make use of the CoreAudio stream buffers as
1388 // "device buffers". However, we can't do this if using multiple
1390 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1392 bool makeBuffer = true;
1393 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1394 if ( mode == INPUT ) {
1395 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1396 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1397 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1402 bufferBytes *= *bufferSize;
1403 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1404 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1405 if ( stream_.deviceBuffer == NULL ) {
1406 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1412 stream_.sampleRate = sampleRate;
1413 stream_.device[mode] = device;
1414 stream_.state = STREAM_STOPPED;
1415 stream_.callbackInfo.object = (void *) this;
1417 // Setup the buffer conversion information structure.
1418 if ( stream_.doConvertBuffer[mode] ) {
1419 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1420 else setConvertInfo( mode, channelOffset );
1423 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1424 // Only one callback procedure per device.
1425 stream_.mode = DUPLEX;
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1430 // deprecated in favor of AudioDeviceCreateIOProcID()
1431 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1433 if ( result != noErr ) {
1434 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1435 errorText_ = errorStream_.str();
1438 if ( stream_.mode == OUTPUT && mode == INPUT )
1439 stream_.mode = DUPLEX;
1441 stream_.mode = mode;
1444 // Setup the device property listener for over/underload.
1445 property.mSelector = kAudioDeviceProcessorOverload;
1446 property.mScope = kAudioObjectPropertyScopeGlobal;
1447 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1453 pthread_cond_destroy( &handle->condition );
1455 stream_.apiHandle = 0;
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 stream_.state = STREAM_CLOSED;
1474 void RtApiCore :: closeStream( void )
1476 if ( stream_.state == STREAM_CLOSED ) {
1477 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1478 error( RtAudioError::WARNING );
1482 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1483 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1485 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1486 kAudioObjectPropertyScopeGlobal,
1487 kAudioObjectPropertyElementMaster };
1489 property.mSelector = kAudioDeviceProcessorOverload;
1490 property.mScope = kAudioObjectPropertyScopeGlobal;
1491 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1492 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1493 error( RtAudioError::WARNING );
1496 if ( stream_.state == STREAM_RUNNING )
1497 AudioDeviceStop( handle->id[0], callbackHandler );
1498 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1499 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1501 // deprecated in favor of AudioDeviceDestroyIOProcID()
1502 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1506 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1508 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1509 kAudioObjectPropertyScopeGlobal,
1510 kAudioObjectPropertyElementMaster };
1512 property.mSelector = kAudioDeviceProcessorOverload;
1513 property.mScope = kAudioObjectPropertyScopeGlobal;
1514 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1515 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1516 error( RtAudioError::WARNING );
1519 if ( stream_.state == STREAM_RUNNING )
1520 AudioDeviceStop( handle->id[1], callbackHandler );
1521 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1522 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1524 // deprecated in favor of AudioDeviceDestroyIOProcID()
1525 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1529 for ( int i=0; i<2; i++ ) {
1530 if ( stream_.userBuffer[i] ) {
1531 free( stream_.userBuffer[i] );
1532 stream_.userBuffer[i] = 0;
1536 if ( stream_.deviceBuffer ) {
1537 free( stream_.deviceBuffer );
1538 stream_.deviceBuffer = 0;
1541 // Destroy pthread condition variable.
1542 pthread_cond_destroy( &handle->condition );
1544 stream_.apiHandle = 0;
1546 stream_.mode = UNINITIALIZED;
1547 stream_.state = STREAM_CLOSED;
1550 void RtApiCore :: startStream( void )
1553 if ( stream_.state == STREAM_RUNNING ) {
1554 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1555 error( RtAudioError::WARNING );
1559 OSStatus result = noErr;
1560 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1561 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1563 result = AudioDeviceStart( handle->id[0], callbackHandler );
1564 if ( result != noErr ) {
1565 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1566 errorText_ = errorStream_.str();
1571 if ( stream_.mode == INPUT ||
1572 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1574 result = AudioDeviceStart( handle->id[1], callbackHandler );
1575 if ( result != noErr ) {
1576 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1577 errorText_ = errorStream_.str();
1582 handle->drainCounter = 0;
1583 handle->internalDrain = false;
1584 stream_.state = STREAM_RUNNING;
1587 if ( result == noErr ) return;
1588 error( RtAudioError::SYSTEM_ERROR );
1591 void RtApiCore :: stopStream( void )
1594 if ( stream_.state == STREAM_STOPPED ) {
1595 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1596 error( RtAudioError::WARNING );
1600 OSStatus result = noErr;
1601 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1602 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1604 if ( handle->drainCounter == 0 ) {
1605 handle->drainCounter = 2;
1606 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1609 result = AudioDeviceStop( handle->id[0], callbackHandler );
1610 if ( result != noErr ) {
1611 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1612 errorText_ = errorStream_.str();
1617 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1619 result = AudioDeviceStop( handle->id[1], callbackHandler );
1620 if ( result != noErr ) {
1621 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1622 errorText_ = errorStream_.str();
1627 stream_.state = STREAM_STOPPED;
1630 if ( result == noErr ) return;
1631 error( RtAudioError::SYSTEM_ERROR );
1634 void RtApiCore :: abortStream( void )
1637 if ( stream_.state == STREAM_STOPPED ) {
1638 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1639 error( RtAudioError::WARNING );
1643 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1644 handle->drainCounter = 2;
1649 // This function will be called by a spawned thread when the user
1650 // callback function signals that the stream should be stopped or
1651 // aborted. It is better to handle it this way because the
1652 // callbackEvent() function probably should return before the AudioDeviceStop()
1653 // function is called.
1654 static void *coreStopStream( void *ptr )
1656 CallbackInfo *info = (CallbackInfo *) ptr;
1657 RtApiCore *object = (RtApiCore *) info->object;
1659 object->stopStream();
1660 pthread_exit( NULL );
1663 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1664 const AudioBufferList *inBufferList,
1665 const AudioBufferList *outBufferList )
1667 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1668 if ( stream_.state == STREAM_CLOSED ) {
1669 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1670 error( RtAudioError::WARNING );
1674 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1675 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1677 // Check if we were draining the stream and signal is finished.
1678 if ( handle->drainCounter > 3 ) {
1679 ThreadHandle threadId;
1681 stream_.state = STREAM_STOPPING;
1682 if ( handle->internalDrain == true )
1683 pthread_create( &threadId, NULL, coreStopStream, info );
1684 else // external call to stopStream()
1685 pthread_cond_signal( &handle->condition );
1689 AudioDeviceID outputDevice = handle->id[0];
1691 // Invoke user callback to get fresh output data UNLESS we are
1692 // draining stream or duplex mode AND the input/output devices are
1693 // different AND this function is called for the input device.
1694 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1695 RtAudioCallback callback = (RtAudioCallback) info->callback;
1696 double streamTime = getStreamTime();
1697 RtAudioStreamStatus status = 0;
1698 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1699 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1700 handle->xrun[0] = false;
1702 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1703 status |= RTAUDIO_INPUT_OVERFLOW;
1704 handle->xrun[1] = false;
1707 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1708 stream_.bufferSize, streamTime, status, info->userData );
1709 if ( cbReturnValue == 2 ) {
1710 stream_.state = STREAM_STOPPING;
1711 handle->drainCounter = 2;
1715 else if ( cbReturnValue == 1 ) {
1716 handle->drainCounter = 1;
1717 handle->internalDrain = true;
1721 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1723 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1725 if ( handle->nStreams[0] == 1 ) {
1726 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1728 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1730 else { // fill multiple streams with zeros
1731 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1732 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1734 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1738 else if ( handle->nStreams[0] == 1 ) {
1739 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1740 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1741 stream_.userBuffer[0], stream_.convertInfo[0] );
1743 else { // copy from user buffer
1744 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1745 stream_.userBuffer[0],
1746 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1749 else { // fill multiple streams
1750 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1751 if ( stream_.doConvertBuffer[0] ) {
1752 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1753 inBuffer = (Float32 *) stream_.deviceBuffer;
1756 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1757 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1758 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1759 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1760 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1763 else { // fill multiple multi-channel streams with interleaved data
1764 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1767 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1768 UInt32 inChannels = stream_.nUserChannels[0];
1769 if ( stream_.doConvertBuffer[0] ) {
1770 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1771 inChannels = stream_.nDeviceChannels[0];
1774 if ( inInterleaved ) inOffset = 1;
1775 else inOffset = stream_.bufferSize;
1777 channelsLeft = inChannels;
1778 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1780 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1781 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1784 // Account for possible channel offset in first stream
1785 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1786 streamChannels -= stream_.channelOffset[0];
1787 outJump = stream_.channelOffset[0];
1791 // Account for possible unfilled channels at end of the last stream
1792 if ( streamChannels > channelsLeft ) {
1793 outJump = streamChannels - channelsLeft;
1794 streamChannels = channelsLeft;
1797 // Determine input buffer offsets and skips
1798 if ( inInterleaved ) {
1799 inJump = inChannels;
1800 in += inChannels - channelsLeft;
1804 in += (inChannels - channelsLeft) * inOffset;
1807 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1808 for ( unsigned int j=0; j<streamChannels; j++ ) {
1809 *out++ = in[j*inOffset];
1814 channelsLeft -= streamChannels;
1820 // Don't bother draining input
1821 if ( handle->drainCounter ) {
1822 handle->drainCounter++;
1826 AudioDeviceID inputDevice;
1827 inputDevice = handle->id[1];
1828 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1830 if ( handle->nStreams[1] == 1 ) {
1831 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1832 convertBuffer( stream_.userBuffer[1],
1833 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1834 stream_.convertInfo[1] );
1836 else { // copy to user buffer
1837 memcpy( stream_.userBuffer[1],
1838 inBufferList->mBuffers[handle->iStream[1]].mData,
1839 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1842 else { // read from multiple streams
1843 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1844 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1846 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1847 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1848 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1849 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1850 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1853 else { // read from multiple multi-channel streams
1854 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1857 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1858 UInt32 outChannels = stream_.nUserChannels[1];
1859 if ( stream_.doConvertBuffer[1] ) {
1860 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1861 outChannels = stream_.nDeviceChannels[1];
1864 if ( outInterleaved ) outOffset = 1;
1865 else outOffset = stream_.bufferSize;
1867 channelsLeft = outChannels;
1868 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1870 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1871 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1874 // Account for possible channel offset in first stream
1875 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1876 streamChannels -= stream_.channelOffset[1];
1877 inJump = stream_.channelOffset[1];
1881 // Account for possible unread channels at end of the last stream
1882 if ( streamChannels > channelsLeft ) {
1883 inJump = streamChannels - channelsLeft;
1884 streamChannels = channelsLeft;
1887 // Determine output buffer offsets and skips
1888 if ( outInterleaved ) {
1889 outJump = outChannels;
1890 out += outChannels - channelsLeft;
1894 out += (outChannels - channelsLeft) * outOffset;
1897 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1898 for ( unsigned int j=0; j<streamChannels; j++ ) {
1899 out[j*outOffset] = *in++;
1904 channelsLeft -= streamChannels;
1908 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1909 convertBuffer( stream_.userBuffer[1],
1910 stream_.deviceBuffer,
1911 stream_.convertInfo[1] );
1917 //MUTEX_UNLOCK( &stream_.mutex );
1919 RtApi::tickStreamTime();
1923 const char* RtApiCore :: getErrorCode( OSStatus code )
1927 case kAudioHardwareNotRunningError:
1928 return "kAudioHardwareNotRunningError";
1930 case kAudioHardwareUnspecifiedError:
1931 return "kAudioHardwareUnspecifiedError";
1933 case kAudioHardwareUnknownPropertyError:
1934 return "kAudioHardwareUnknownPropertyError";
1936 case kAudioHardwareBadPropertySizeError:
1937 return "kAudioHardwareBadPropertySizeError";
1939 case kAudioHardwareIllegalOperationError:
1940 return "kAudioHardwareIllegalOperationError";
1942 case kAudioHardwareBadObjectError:
1943 return "kAudioHardwareBadObjectError";
1945 case kAudioHardwareBadDeviceError:
1946 return "kAudioHardwareBadDeviceError";
1948 case kAudioHardwareBadStreamError:
1949 return "kAudioHardwareBadStreamError";
1951 case kAudioHardwareUnsupportedOperationError:
1952 return "kAudioHardwareUnsupportedOperationError";
1954 case kAudioDeviceUnsupportedFormatError:
1955 return "kAudioDeviceUnsupportedFormatError";
1957 case kAudioDevicePermissionsError:
1958 return "kAudioDevicePermissionsError";
1961 return "CoreAudio unknown error";
1965 //******************** End of __MACOSX_CORE__ *********************//
1968 #if defined(__UNIX_JACK__)
1970 // JACK is a low-latency audio server, originally written for the
1971 // GNU/Linux operating system and now also ported to OS-X. It can
1972 // connect a number of different applications to an audio device, as
1973 // well as allowing them to share audio between themselves.
1975 // When using JACK with RtAudio, "devices" refer to JACK clients that
1976 // have ports connected to the server. The JACK server is typically
1977 // started in a terminal as follows:
1979 // .jackd -d alsa -d hw:0
1981 // or through an interface program such as qjackctl. Many of the
1982 // parameters normally set for a stream are fixed by the JACK server
1983 // and can be specified when the JACK server is started. In
1986 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1988 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1989 // frames, and number of buffers = 4. Once the server is running, it
1990 // is not possible to override these values. If the values are not
1991 // specified in the command-line, the JACK server uses default values.
1993 // The JACK server does not have to be running when an instance of
1994 // RtApiJack is created, though the function getDeviceCount() will
1995 // report 0 devices found until JACK has been started. When no
1996 // devices are available (i.e., the JACK server is not running), a
1997 // stream cannot be opened.
1999 #include <jack/jack.h>
2003 // A structure to hold various information related to the Jack API
2006 jack_client_t *client;
2007 jack_port_t **ports[2];
2008 std::string deviceName[2];
2010 pthread_cond_t condition;
2011 int drainCounter; // Tracks callback counts when draining
2012 bool internalDrain; // Indicates if stop is initiated from callback or not.
2015 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2018 #if !defined(__RTAUDIO_DEBUG__)
2019 static void jackSilentError( const char * ) {};
2022 RtApiJack :: RtApiJack()
2023 :shouldAutoconnect_(true) {
2024 // Nothing to do here.
2025 #if !defined(__RTAUDIO_DEBUG__)
2026 // Turn off Jack's internal error reporting.
2027 jack_set_error_function( &jackSilentError );
2031 RtApiJack :: ~RtApiJack()
2033 if ( stream_.state != STREAM_CLOSED ) closeStream();
2036 unsigned int RtApiJack :: getDeviceCount( void )
2038 // See if we can become a jack client.
2039 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2040 jack_status_t *status = NULL;
2041 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2042 if ( client == 0 ) return 0;
2045 std::string port, previousPort;
2046 unsigned int nChannels = 0, nDevices = 0;
2047 ports = jack_get_ports( client, NULL, NULL, 0 );
2049 // Parse the port names up to the first colon (:).
2052 port = (char *) ports[ nChannels ];
2053 iColon = port.find(":");
2054 if ( iColon != std::string::npos ) {
2055 port = port.substr( 0, iColon + 1 );
2056 if ( port != previousPort ) {
2058 previousPort = port;
2061 } while ( ports[++nChannels] );
2065 jack_client_close( client );
2069 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2071 RtAudio::DeviceInfo info;
2072 info.probed = false;
2074 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2075 jack_status_t *status = NULL;
2076 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2077 if ( client == 0 ) {
2078 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2079 error( RtAudioError::WARNING );
2084 std::string port, previousPort;
2085 unsigned int nPorts = 0, nDevices = 0;
2086 ports = jack_get_ports( client, NULL, NULL, 0 );
2088 // Parse the port names up to the first colon (:).
2091 port = (char *) ports[ nPorts ];
2092 iColon = port.find(":");
2093 if ( iColon != std::string::npos ) {
2094 port = port.substr( 0, iColon );
2095 if ( port != previousPort ) {
2096 if ( nDevices == device ) info.name = port;
2098 previousPort = port;
2101 } while ( ports[++nPorts] );
2105 if ( device >= nDevices ) {
2106 jack_client_close( client );
2107 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2108 error( RtAudioError::INVALID_USE );
2112 // Get the current jack server sample rate.
2113 info.sampleRates.clear();
2115 info.preferredSampleRate = jack_get_sample_rate( client );
2116 info.sampleRates.push_back( info.preferredSampleRate );
2118 // Count the available ports containing the client name as device
2119 // channels. Jack "input ports" equal RtAudio output channels.
2120 unsigned int nChannels = 0;
2121 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2123 while ( ports[ nChannels ] ) nChannels++;
2125 info.outputChannels = nChannels;
2128 // Jack "output ports" equal RtAudio input channels.
2130 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2132 while ( ports[ nChannels ] ) nChannels++;
2134 info.inputChannels = nChannels;
2137 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2138 jack_client_close(client);
2139 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2140 error( RtAudioError::WARNING );
2144 // If device opens for both playback and capture, we determine the channels.
2145 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2146 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2148 // Jack always uses 32-bit floats.
2149 info.nativeFormats = RTAUDIO_FLOAT32;
2151 // Jack doesn't provide default devices so we'll use the first available one.
2152 if ( device == 0 && info.outputChannels > 0 )
2153 info.isDefaultOutput = true;
2154 if ( device == 0 && info.inputChannels > 0 )
2155 info.isDefaultInput = true;
2157 jack_client_close(client);
2162 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2164 CallbackInfo *info = (CallbackInfo *) infoPointer;
2166 RtApiJack *object = (RtApiJack *) info->object;
2167 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2172 // This function will be called by a spawned thread when the Jack
2173 // server signals that it is shutting down. It is necessary to handle
2174 // it this way because the jackShutdown() function must return before
2175 // the jack_deactivate() function (in closeStream()) will return.
2176 static void *jackCloseStream( void *ptr )
2178 CallbackInfo *info = (CallbackInfo *) ptr;
2179 RtApiJack *object = (RtApiJack *) info->object;
2181 object->closeStream();
2183 pthread_exit( NULL );
2185 static void jackShutdown( void *infoPointer )
2187 CallbackInfo *info = (CallbackInfo *) infoPointer;
2188 RtApiJack *object = (RtApiJack *) info->object;
2190 // Check current stream state. If stopped, then we'll assume this
2191 // was called as a result of a call to RtApiJack::stopStream (the
2192 // deactivation of a client handle causes this function to be called).
2193 // If not, we'll assume the Jack server is shutting down or some
2194 // other problem occurred and we should close the stream.
2195 if ( object->isStreamRunning() == false ) return;
2197 ThreadHandle threadId;
2198 pthread_create( &threadId, NULL, jackCloseStream, info );
2199 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2202 static int jackXrun( void *infoPointer )
2204 JackHandle *handle = *((JackHandle **) infoPointer);
2206 if ( handle->ports[0] ) handle->xrun[0] = true;
2207 if ( handle->ports[1] ) handle->xrun[1] = true;
2212 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2213 unsigned int firstChannel, unsigned int sampleRate,
2214 RtAudioFormat format, unsigned int *bufferSize,
2215 RtAudio::StreamOptions *options )
2217 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2219 // Look for jack server and try to become a client (only do once per stream).
2220 jack_client_t *client = 0;
2221 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2222 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2223 jack_status_t *status = NULL;
2224 if ( options && !options->streamName.empty() )
2225 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2227 client = jack_client_open( "RtApiJack", jackoptions, status );
2228 if ( client == 0 ) {
2229 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2230 error( RtAudioError::WARNING );
2235 // The handle must have been created on an earlier pass.
2236 client = handle->client;
2240 std::string port, previousPort, deviceName;
2241 unsigned int nPorts = 0, nDevices = 0;
2242 ports = jack_get_ports( client, NULL, NULL, 0 );
2244 // Parse the port names up to the first colon (:).
2247 port = (char *) ports[ nPorts ];
2248 iColon = port.find(":");
2249 if ( iColon != std::string::npos ) {
2250 port = port.substr( 0, iColon );
2251 if ( port != previousPort ) {
2252 if ( nDevices == device ) deviceName = port;
2254 previousPort = port;
2257 } while ( ports[++nPorts] );
2261 if ( device >= nDevices ) {
2262 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2266 // Count the available ports containing the client name as device
2267 // channels. Jack "input ports" equal RtAudio output channels.
2268 unsigned int nChannels = 0;
2269 unsigned long flag = JackPortIsInput;
2270 if ( mode == INPUT ) flag = JackPortIsOutput;
2271 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2273 while ( ports[ nChannels ] ) nChannels++;
2277 // Compare the jack ports for specified client to the requested number of channels.
2278 if ( nChannels < (channels + firstChannel) ) {
2279 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2280 errorText_ = errorStream_.str();
2284 // Check the jack server sample rate.
2285 unsigned int jackRate = jack_get_sample_rate( client );
2286 if ( sampleRate != jackRate ) {
2287 jack_client_close( client );
2288 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2289 errorText_ = errorStream_.str();
2292 stream_.sampleRate = jackRate;
2294 // Get the latency of the JACK port.
2295 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2296 if ( ports[ firstChannel ] ) {
2298 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2299 // the range (usually the min and max are equal)
2300 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2301 // get the latency range
2302 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2303 // be optimistic, use the min!
2304 stream_.latency[mode] = latrange.min;
2305 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2309 // The jack server always uses 32-bit floating-point data.
2310 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2311 stream_.userFormat = format;
2313 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2314 else stream_.userInterleaved = true;
2316 // Jack always uses non-interleaved buffers.
2317 stream_.deviceInterleaved[mode] = false;
2319 // Jack always provides host byte-ordered data.
2320 stream_.doByteSwap[mode] = false;
2322 // Get the buffer size. The buffer size and number of buffers
2323 // (periods) is set when the jack server is started.
2324 stream_.bufferSize = (int) jack_get_buffer_size( client );
2325 *bufferSize = stream_.bufferSize;
2327 stream_.nDeviceChannels[mode] = channels;
2328 stream_.nUserChannels[mode] = channels;
2330 // Set flags for buffer conversion.
2331 stream_.doConvertBuffer[mode] = false;
2332 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2333 stream_.doConvertBuffer[mode] = true;
2334 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2335 stream_.nUserChannels[mode] > 1 )
2336 stream_.doConvertBuffer[mode] = true;
2338 // Allocate our JackHandle structure for the stream.
2339 if ( handle == 0 ) {
2341 handle = new JackHandle;
2343 catch ( std::bad_alloc& ) {
2344 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2348 if ( pthread_cond_init(&handle->condition, NULL) ) {
2349 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2352 stream_.apiHandle = (void *) handle;
2353 handle->client = client;
2355 handle->deviceName[mode] = deviceName;
2357 // Allocate necessary internal buffers.
2358 unsigned long bufferBytes;
2359 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2360 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2361 if ( stream_.userBuffer[mode] == NULL ) {
2362 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2366 if ( stream_.doConvertBuffer[mode] ) {
2368 bool makeBuffer = true;
2369 if ( mode == OUTPUT )
2370 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2371 else { // mode == INPUT
2372 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2373 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2374 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2375 if ( bufferBytes < bytesOut ) makeBuffer = false;
2380 bufferBytes *= *bufferSize;
2381 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2382 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2383 if ( stream_.deviceBuffer == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2390 // Allocate memory for the Jack ports (channels) identifiers.
2391 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2392 if ( handle->ports[mode] == NULL ) {
2393 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2397 stream_.device[mode] = device;
2398 stream_.channelOffset[mode] = firstChannel;
2399 stream_.state = STREAM_STOPPED;
2400 stream_.callbackInfo.object = (void *) this;
2402 if ( stream_.mode == OUTPUT && mode == INPUT )
2403 // We had already set up the stream for output.
2404 stream_.mode = DUPLEX;
2406 stream_.mode = mode;
2407 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2408 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2409 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2412 // Register our ports.
2414 if ( mode == OUTPUT ) {
2415 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2416 snprintf( label, 64, "outport %d", i );
2417 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2418 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2422 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2423 snprintf( label, 64, "inport %d", i );
2424 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2425 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2429 // Setup the buffer conversion information structure. We don't use
2430 // buffers to do channel offsets, so we override that parameter
2432 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2434 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2440 pthread_cond_destroy( &handle->condition );
2441 jack_client_close( handle->client );
2443 if ( handle->ports[0] ) free( handle->ports[0] );
2444 if ( handle->ports[1] ) free( handle->ports[1] );
2447 stream_.apiHandle = 0;
2450 for ( int i=0; i<2; i++ ) {
2451 if ( stream_.userBuffer[i] ) {
2452 free( stream_.userBuffer[i] );
2453 stream_.userBuffer[i] = 0;
2457 if ( stream_.deviceBuffer ) {
2458 free( stream_.deviceBuffer );
2459 stream_.deviceBuffer = 0;
2465 void RtApiJack :: closeStream( void )
2467 if ( stream_.state == STREAM_CLOSED ) {
2468 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2469 error( RtAudioError::WARNING );
2473 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2476 if ( stream_.state == STREAM_RUNNING )
2477 jack_deactivate( handle->client );
2479 jack_client_close( handle->client );
2483 if ( handle->ports[0] ) free( handle->ports[0] );
2484 if ( handle->ports[1] ) free( handle->ports[1] );
2485 pthread_cond_destroy( &handle->condition );
2487 stream_.apiHandle = 0;
2490 for ( int i=0; i<2; i++ ) {
2491 if ( stream_.userBuffer[i] ) {
2492 free( stream_.userBuffer[i] );
2493 stream_.userBuffer[i] = 0;
2497 if ( stream_.deviceBuffer ) {
2498 free( stream_.deviceBuffer );
2499 stream_.deviceBuffer = 0;
2502 stream_.mode = UNINITIALIZED;
2503 stream_.state = STREAM_CLOSED;
2506 void RtApiJack :: startStream( void )
2509 if ( stream_.state == STREAM_RUNNING ) {
2510 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2511 error( RtAudioError::WARNING );
2515 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2516 int result = jack_activate( handle->client );
2518 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2524 // Get the list of available ports.
2525 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2527 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2528 if ( ports == NULL) {
2529 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2533 // Now make the port connections. Since RtAudio wasn't designed to
2534 // allow the user to select particular channels of a device, we'll
2535 // just open the first "nChannels" ports with offset.
2536 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2538 if ( ports[ stream_.channelOffset[0] + i ] )
2539 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2542 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2549 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2551 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2552 if ( ports == NULL) {
2553 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2557 // Now make the port connections. See note above.
2558 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2560 if ( ports[ stream_.channelOffset[1] + i ] )
2561 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2564 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2571 handle->drainCounter = 0;
2572 handle->internalDrain = false;
2573 stream_.state = STREAM_RUNNING;
2576 if ( result == 0 ) return;
2577 error( RtAudioError::SYSTEM_ERROR );
2580 void RtApiJack :: stopStream( void )
2583 if ( stream_.state == STREAM_STOPPED ) {
2584 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2585 error( RtAudioError::WARNING );
2589 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2590 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2592 if ( handle->drainCounter == 0 ) {
2593 handle->drainCounter = 2;
2594 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2598 jack_deactivate( handle->client );
2599 stream_.state = STREAM_STOPPED;
2602 void RtApiJack :: abortStream( void )
2605 if ( stream_.state == STREAM_STOPPED ) {
2606 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2607 error( RtAudioError::WARNING );
2611 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2612 handle->drainCounter = 2;
2617 // This function will be called by a spawned thread when the user
2618 // callback function signals that the stream should be stopped or
2619 // aborted. It is necessary to handle it this way because the
2620 // callbackEvent() function must return before the jack_deactivate()
2621 // function will return.
2622 static void *jackStopStream( void *ptr )
2624 CallbackInfo *info = (CallbackInfo *) ptr;
2625 RtApiJack *object = (RtApiJack *) info->object;
2627 object->stopStream();
2628 pthread_exit( NULL );
2631 bool RtApiJack :: callbackEvent( unsigned long nframes )
2633 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2634 if ( stream_.state == STREAM_CLOSED ) {
2635 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2636 error( RtAudioError::WARNING );
2639 if ( stream_.bufferSize != nframes ) {
2640 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2641 error( RtAudioError::WARNING );
2645 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2646 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2648 // Check if we were draining the stream and signal is finished.
2649 if ( handle->drainCounter > 3 ) {
2650 ThreadHandle threadId;
2652 stream_.state = STREAM_STOPPING;
2653 if ( handle->internalDrain == true )
2654 pthread_create( &threadId, NULL, jackStopStream, info );
2656 pthread_cond_signal( &handle->condition );
2660 // Invoke user callback first, to get fresh output data.
2661 if ( handle->drainCounter == 0 ) {
2662 RtAudioCallback callback = (RtAudioCallback) info->callback;
2663 double streamTime = getStreamTime();
2664 RtAudioStreamStatus status = 0;
2665 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2666 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2667 handle->xrun[0] = false;
2669 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2670 status |= RTAUDIO_INPUT_OVERFLOW;
2671 handle->xrun[1] = false;
2673 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2674 stream_.bufferSize, streamTime, status, info->userData );
2675 if ( cbReturnValue == 2 ) {
2676 stream_.state = STREAM_STOPPING;
2677 handle->drainCounter = 2;
2679 pthread_create( &id, NULL, jackStopStream, info );
2682 else if ( cbReturnValue == 1 ) {
2683 handle->drainCounter = 1;
2684 handle->internalDrain = true;
2688 jack_default_audio_sample_t *jackbuffer;
2689 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2690 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2692 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2694 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2695 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2696 memset( jackbuffer, 0, bufferBytes );
2700 else if ( stream_.doConvertBuffer[0] ) {
2702 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2704 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2705 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2706 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2709 else { // no buffer conversion
2710 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2711 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2712 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2717 // Don't bother draining input
2718 if ( handle->drainCounter ) {
2719 handle->drainCounter++;
2723 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2725 if ( stream_.doConvertBuffer[1] ) {
2726 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2727 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2728 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2730 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2732 else { // no buffer conversion
2733 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2734 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2735 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2741 RtApi::tickStreamTime();
2744 //******************** End of __UNIX_JACK__ *********************//
2747 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2749 // The ASIO API is designed around a callback scheme, so this
2750 // implementation is similar to that used for OS-X CoreAudio and Linux
2751 // Jack. The primary constraint with ASIO is that it only allows
2752 // access to a single driver at a time. Thus, it is not possible to
2753 // have more than one simultaneous RtAudio stream.
2755 // This implementation also requires a number of external ASIO files
2756 // and a few global variables. The ASIO callback scheme does not
2757 // allow for the passing of user data, so we must create a global
2758 // pointer to our callbackInfo structure.
2760 // On unix systems, we make use of a pthread condition variable.
2761 // Since there is no equivalent in Windows, I hacked something based
2762 // on information found in
2763 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2765 #include "asiosys.h"
2767 #include "iasiothiscallresolver.h"
2768 #include "asiodrivers.h"
2771 static AsioDrivers drivers;
2772 static ASIOCallbacks asioCallbacks;
2773 static ASIODriverInfo driverInfo;
2774 static CallbackInfo *asioCallbackInfo;
2775 static bool asioXRun;
2778 int drainCounter; // Tracks callback counts when draining
2779 bool internalDrain; // Indicates if stop is initiated from callback or not.
2780 ASIOBufferInfo *bufferInfos;
2784 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2787 // Function declarations (definitions at end of section)
2788 static const char* getAsioErrorString( ASIOError result );
2789 static void sampleRateChanged( ASIOSampleRate sRate );
2790 static long asioMessages( long selector, long value, void* message, double* opt );
2792 RtApiAsio :: RtApiAsio()
2794 // ASIO cannot run on a multi-threaded appartment. You can call
2795 // CoInitialize beforehand, but it must be for appartment threading
2796 // (in which case, CoInitilialize will return S_FALSE here).
2797 coInitialized_ = false;
2798 HRESULT hr = CoInitialize( NULL );
2800 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2801 error( RtAudioError::WARNING );
2803 coInitialized_ = true;
2805 drivers.removeCurrentDriver();
2806 driverInfo.asioVersion = 2;
2808 // See note in DirectSound implementation about GetDesktopWindow().
2809 driverInfo.sysRef = GetForegroundWindow();
2812 RtApiAsio :: ~RtApiAsio()
2814 if ( stream_.state != STREAM_CLOSED ) closeStream();
2815 if ( coInitialized_ ) CoUninitialize();
2818 unsigned int RtApiAsio :: getDeviceCount( void )
2820 return (unsigned int) drivers.asioGetNumDev();
2823 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2825 RtAudio::DeviceInfo info;
2826 info.probed = false;
2829 unsigned int nDevices = getDeviceCount();
2830 if ( nDevices == 0 ) {
2831 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2832 error( RtAudioError::INVALID_USE );
2836 if ( device >= nDevices ) {
2837 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2838 error( RtAudioError::INVALID_USE );
2842 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2843 if ( stream_.state != STREAM_CLOSED ) {
2844 if ( device >= devices_.size() ) {
2845 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2846 error( RtAudioError::WARNING );
2849 return devices_[ device ];
2852 char driverName[32];
2853 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2854 if ( result != ASE_OK ) {
2855 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2856 errorText_ = errorStream_.str();
2857 error( RtAudioError::WARNING );
2861 info.name = driverName;
2863 if ( !drivers.loadDriver( driverName ) ) {
2864 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2865 errorText_ = errorStream_.str();
2866 error( RtAudioError::WARNING );
2870 result = ASIOInit( &driverInfo );
2871 if ( result != ASE_OK ) {
2872 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2873 errorText_ = errorStream_.str();
2874 error( RtAudioError::WARNING );
2878 // Determine the device channel information.
2879 long inputChannels, outputChannels;
2880 result = ASIOGetChannels( &inputChannels, &outputChannels );
2881 if ( result != ASE_OK ) {
2882 drivers.removeCurrentDriver();
2883 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2884 errorText_ = errorStream_.str();
2885 error( RtAudioError::WARNING );
2889 info.outputChannels = outputChannels;
2890 info.inputChannels = inputChannels;
2891 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2892 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2894 // Determine the supported sample rates.
2895 info.sampleRates.clear();
2896 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2897 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2898 if ( result == ASE_OK ) {
2899 info.sampleRates.push_back( SAMPLE_RATES[i] );
2901 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2902 info.preferredSampleRate = SAMPLE_RATES[i];
2906 // Determine supported data types ... just check first channel and assume rest are the same.
2907 ASIOChannelInfo channelInfo;
2908 channelInfo.channel = 0;
2909 channelInfo.isInput = true;
2910 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2911 result = ASIOGetChannelInfo( &channelInfo );
2912 if ( result != ASE_OK ) {
2913 drivers.removeCurrentDriver();
2914 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2915 errorText_ = errorStream_.str();
2916 error( RtAudioError::WARNING );
2920 info.nativeFormats = 0;
2921 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2922 info.nativeFormats |= RTAUDIO_SINT16;
2923 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2924 info.nativeFormats |= RTAUDIO_SINT32;
2925 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2926 info.nativeFormats |= RTAUDIO_FLOAT32;
2927 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2928 info.nativeFormats |= RTAUDIO_FLOAT64;
2929 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2930 info.nativeFormats |= RTAUDIO_SINT24;
2932 if ( info.outputChannels > 0 )
2933 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2934 if ( info.inputChannels > 0 )
2935 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2938 drivers.removeCurrentDriver();
2942 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2944 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2945 object->callbackEvent( index );
2948 void RtApiAsio :: saveDeviceInfo( void )
2952 unsigned int nDevices = getDeviceCount();
2953 devices_.resize( nDevices );
2954 for ( unsigned int i=0; i<nDevices; i++ )
2955 devices_[i] = getDeviceInfo( i );
2958 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2959 unsigned int firstChannel, unsigned int sampleRate,
2960 RtAudioFormat format, unsigned int *bufferSize,
2961 RtAudio::StreamOptions *options )
2962 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2964 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2966 // For ASIO, a duplex stream MUST use the same driver.
2967 if ( isDuplexInput && stream_.device[0] != device ) {
2968 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2972 char driverName[32];
2973 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2974 if ( result != ASE_OK ) {
2975 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2976 errorText_ = errorStream_.str();
2980 // Only load the driver once for duplex stream.
2981 if ( !isDuplexInput ) {
2982 // The getDeviceInfo() function will not work when a stream is open
2983 // because ASIO does not allow multiple devices to run at the same
2984 // time. Thus, we'll probe the system before opening a stream and
2985 // save the results for use by getDeviceInfo().
2986 this->saveDeviceInfo();
2988 if ( !drivers.loadDriver( driverName ) ) {
2989 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2990 errorText_ = errorStream_.str();
2994 result = ASIOInit( &driverInfo );
2995 if ( result != ASE_OK ) {
2996 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2997 errorText_ = errorStream_.str();
3002 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3003 bool buffersAllocated = false;
3004 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3005 unsigned int nChannels;
3008 // Check the device channel count.
3009 long inputChannels, outputChannels;
3010 result = ASIOGetChannels( &inputChannels, &outputChannels );
3011 if ( result != ASE_OK ) {
3012 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3013 errorText_ = errorStream_.str();
3017 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3018 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3019 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3020 errorText_ = errorStream_.str();
3023 stream_.nDeviceChannels[mode] = channels;
3024 stream_.nUserChannels[mode] = channels;
3025 stream_.channelOffset[mode] = firstChannel;
3027 // Verify the sample rate is supported.
3028 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3029 if ( result != ASE_OK ) {
3030 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3031 errorText_ = errorStream_.str();
3035 // Get the current sample rate
3036 ASIOSampleRate currentRate;
3037 result = ASIOGetSampleRate( ¤tRate );
3038 if ( result != ASE_OK ) {
3039 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3040 errorText_ = errorStream_.str();
3044 // Set the sample rate only if necessary
3045 if ( currentRate != sampleRate ) {
3046 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3047 if ( result != ASE_OK ) {
3048 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3049 errorText_ = errorStream_.str();
3054 // Determine the driver data type.
3055 ASIOChannelInfo channelInfo;
3056 channelInfo.channel = 0;
3057 if ( mode == OUTPUT ) channelInfo.isInput = false;
3058 else channelInfo.isInput = true;
3059 result = ASIOGetChannelInfo( &channelInfo );
3060 if ( result != ASE_OK ) {
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3062 errorText_ = errorStream_.str();
3066 // Assuming WINDOWS host is always little-endian.
3067 stream_.doByteSwap[mode] = false;
3068 stream_.userFormat = format;
3069 stream_.deviceFormat[mode] = 0;
3070 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3071 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3072 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3074 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3075 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3076 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3078 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3079 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3080 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3082 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3083 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3084 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3086 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3087 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3088 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3091 if ( stream_.deviceFormat[mode] == 0 ) {
3092 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3093 errorText_ = errorStream_.str();
3097 // Set the buffer size. For a duplex stream, this will end up
3098 // setting the buffer size based on the input constraints, which
3100 long minSize, maxSize, preferSize, granularity;
3101 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3102 if ( result != ASE_OK ) {
3103 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3104 errorText_ = errorStream_.str();
3108 if ( isDuplexInput ) {
3109 // When this is the duplex input (output was opened before), then we have to use the same
3110 // buffersize as the output, because it might use the preferred buffer size, which most
3111 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3112 // So instead of throwing an error, make them equal. The caller uses the reference
3113 // to the "bufferSize" param as usual to set up processing buffers.
3115 *bufferSize = stream_.bufferSize;
3118 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3119 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3120 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3121 else if ( granularity == -1 ) {
3122 // Make sure bufferSize is a power of two.
3123 int log2_of_min_size = 0;
3124 int log2_of_max_size = 0;
3126 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3127 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3128 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3131 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3132 int min_delta_num = log2_of_min_size;
3134 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3135 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3136 if (current_delta < min_delta) {
3137 min_delta = current_delta;
3142 *bufferSize = ( (unsigned int)1 << min_delta_num );
3143 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3144 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3146 else if ( granularity != 0 ) {
3147 // Set to an even multiple of granularity, rounding up.
3148 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3153 // we don't use it anymore, see above!
3154 // Just left it here for the case...
3155 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3156 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3161 stream_.bufferSize = *bufferSize;
3162 stream_.nBuffers = 2;
3164 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3165 else stream_.userInterleaved = true;
3167 // ASIO always uses non-interleaved buffers.
3168 stream_.deviceInterleaved[mode] = false;
3170 // Allocate, if necessary, our AsioHandle structure for the stream.
3171 if ( handle == 0 ) {
3173 handle = new AsioHandle;
3175 catch ( std::bad_alloc& ) {
3176 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3179 handle->bufferInfos = 0;
3181 // Create a manual-reset event.
3182 handle->condition = CreateEvent( NULL, // no security
3183 TRUE, // manual-reset
3184 FALSE, // non-signaled initially
3186 stream_.apiHandle = (void *) handle;
3189 // Create the ASIO internal buffers. Since RtAudio sets up input
3190 // and output separately, we'll have to dispose of previously
3191 // created output buffers for a duplex stream.
3192 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3193 ASIODisposeBuffers();
3194 if ( handle->bufferInfos ) free( handle->bufferInfos );
3197 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3199 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3200 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3201 if ( handle->bufferInfos == NULL ) {
3202 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3203 errorText_ = errorStream_.str();
3207 ASIOBufferInfo *infos;
3208 infos = handle->bufferInfos;
3209 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3210 infos->isInput = ASIOFalse;
3211 infos->channelNum = i + stream_.channelOffset[0];
3212 infos->buffers[0] = infos->buffers[1] = 0;
3214 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3215 infos->isInput = ASIOTrue;
3216 infos->channelNum = i + stream_.channelOffset[1];
3217 infos->buffers[0] = infos->buffers[1] = 0;
3220 // prepare for callbacks
3221 stream_.sampleRate = sampleRate;
3222 stream_.device[mode] = device;
3223 stream_.mode = isDuplexInput ? DUPLEX : mode;
3225 // store this class instance before registering callbacks, that are going to use it
3226 asioCallbackInfo = &stream_.callbackInfo;
3227 stream_.callbackInfo.object = (void *) this;
3229 // Set up the ASIO callback structure and create the ASIO data buffers.
3230 asioCallbacks.bufferSwitch = &bufferSwitch;
3231 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3232 asioCallbacks.asioMessage = &asioMessages;
3233 asioCallbacks.bufferSwitchTimeInfo = NULL;
3234 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3235 if ( result != ASE_OK ) {
3236 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3237 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3238 // in that case, let's be naïve and try that instead
3239 *bufferSize = preferSize;
3240 stream_.bufferSize = *bufferSize;
3241 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3244 if ( result != ASE_OK ) {
3245 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3246 errorText_ = errorStream_.str();
3249 buffersAllocated = true;
3250 stream_.state = STREAM_STOPPED;
3252 // Set flags for buffer conversion.
3253 stream_.doConvertBuffer[mode] = false;
3254 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3255 stream_.doConvertBuffer[mode] = true;
3256 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3257 stream_.nUserChannels[mode] > 1 )
3258 stream_.doConvertBuffer[mode] = true;
3260 // Allocate necessary internal buffers
3261 unsigned long bufferBytes;
3262 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3263 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3264 if ( stream_.userBuffer[mode] == NULL ) {
3265 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3269 if ( stream_.doConvertBuffer[mode] ) {
3271 bool makeBuffer = true;
3272 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3273 if ( isDuplexInput && stream_.deviceBuffer ) {
3274 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3275 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3279 bufferBytes *= *bufferSize;
3280 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3281 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3282 if ( stream_.deviceBuffer == NULL ) {
3283 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3289 // Determine device latencies
3290 long inputLatency, outputLatency;
3291 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3292 if ( result != ASE_OK ) {
3293 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3294 errorText_ = errorStream_.str();
3295 error( RtAudioError::WARNING); // warn but don't fail
3298 stream_.latency[0] = outputLatency;
3299 stream_.latency[1] = inputLatency;
3302 // Setup the buffer conversion information structure. We don't use
3303 // buffers to do channel offsets, so we override that parameter
3305 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3310 if ( !isDuplexInput ) {
3311 // the cleanup for error in the duplex input, is done by RtApi::openStream
3312 // So we clean up for single channel only
3314 if ( buffersAllocated )
3315 ASIODisposeBuffers();
3317 drivers.removeCurrentDriver();
3320 CloseHandle( handle->condition );
3321 if ( handle->bufferInfos )
3322 free( handle->bufferInfos );
3325 stream_.apiHandle = 0;
3329 if ( stream_.userBuffer[mode] ) {
3330 free( stream_.userBuffer[mode] );
3331 stream_.userBuffer[mode] = 0;
3334 if ( stream_.deviceBuffer ) {
3335 free( stream_.deviceBuffer );
3336 stream_.deviceBuffer = 0;
3341 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3343 void RtApiAsio :: closeStream()
3345 if ( stream_.state == STREAM_CLOSED ) {
3346 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3347 error( RtAudioError::WARNING );
3351 if ( stream_.state == STREAM_RUNNING ) {
3352 stream_.state = STREAM_STOPPED;
3355 ASIODisposeBuffers();
3356 drivers.removeCurrentDriver();
3358 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3360 CloseHandle( handle->condition );
3361 if ( handle->bufferInfos )
3362 free( handle->bufferInfos );
3364 stream_.apiHandle = 0;
3367 for ( int i=0; i<2; i++ ) {
3368 if ( stream_.userBuffer[i] ) {
3369 free( stream_.userBuffer[i] );
3370 stream_.userBuffer[i] = 0;
3374 if ( stream_.deviceBuffer ) {
3375 free( stream_.deviceBuffer );
3376 stream_.deviceBuffer = 0;
3379 stream_.mode = UNINITIALIZED;
3380 stream_.state = STREAM_CLOSED;
3383 bool stopThreadCalled = false;
3385 void RtApiAsio :: startStream()
3388 if ( stream_.state == STREAM_RUNNING ) {
3389 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3390 error( RtAudioError::WARNING );
3394 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3395 ASIOError result = ASIOStart();
3396 if ( result != ASE_OK ) {
3397 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3398 errorText_ = errorStream_.str();
3402 handle->drainCounter = 0;
3403 handle->internalDrain = false;
3404 ResetEvent( handle->condition );
3405 stream_.state = STREAM_RUNNING;
3409 stopThreadCalled = false;
3411 if ( result == ASE_OK ) return;
3412 error( RtAudioError::SYSTEM_ERROR );
3415 void RtApiAsio :: stopStream()
3418 if ( stream_.state == STREAM_STOPPED ) {
3419 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3420 error( RtAudioError::WARNING );
3424 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3425 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3426 if ( handle->drainCounter == 0 ) {
3427 handle->drainCounter = 2;
3428 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3432 stream_.state = STREAM_STOPPED;
3434 ASIOError result = ASIOStop();
3435 if ( result != ASE_OK ) {
3436 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3437 errorText_ = errorStream_.str();
3440 if ( result == ASE_OK ) return;
3441 error( RtAudioError::SYSTEM_ERROR );
3444 void RtApiAsio :: abortStream()
3447 if ( stream_.state == STREAM_STOPPED ) {
3448 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3449 error( RtAudioError::WARNING );
3453 // The following lines were commented-out because some behavior was
3454 // noted where the device buffers need to be zeroed to avoid
3455 // continuing sound, even when the device buffers are completely
3456 // disposed. So now, calling abort is the same as calling stop.
3457 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3458 // handle->drainCounter = 2;
3462 // This function will be called by a spawned thread when the user
3463 // callback function signals that the stream should be stopped or
3464 // aborted. It is necessary to handle it this way because the
3465 // callbackEvent() function must return before the ASIOStop()
3466 // function will return.
3467 static unsigned __stdcall asioStopStream( void *ptr )
3469 CallbackInfo *info = (CallbackInfo *) ptr;
3470 RtApiAsio *object = (RtApiAsio *) info->object;
3472 object->stopStream();
3477 bool RtApiAsio :: callbackEvent( long bufferIndex )
3479 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3480 if ( stream_.state == STREAM_CLOSED ) {
3481 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3482 error( RtAudioError::WARNING );
3486 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3487 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3489 // Check if we were draining the stream and signal if finished.
3490 if ( handle->drainCounter > 3 ) {
3492 stream_.state = STREAM_STOPPING;
3493 if ( handle->internalDrain == false )
3494 SetEvent( handle->condition );
3495 else { // spawn a thread to stop the stream
3497 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3498 &stream_.callbackInfo, 0, &threadId );
3503 // Invoke user callback to get fresh output data UNLESS we are
3505 if ( handle->drainCounter == 0 ) {
3506 RtAudioCallback callback = (RtAudioCallback) info->callback;
3507 double streamTime = getStreamTime();
3508 RtAudioStreamStatus status = 0;
3509 if ( stream_.mode != INPUT && asioXRun == true ) {
3510 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3513 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3514 status |= RTAUDIO_INPUT_OVERFLOW;
3517 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3518 stream_.bufferSize, streamTime, status, info->userData );
3519 if ( cbReturnValue == 2 ) {
3520 stream_.state = STREAM_STOPPING;
3521 handle->drainCounter = 2;
3523 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3524 &stream_.callbackInfo, 0, &threadId );
3527 else if ( cbReturnValue == 1 ) {
3528 handle->drainCounter = 1;
3529 handle->internalDrain = true;
3533 unsigned int nChannels, bufferBytes, i, j;
3534 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3535 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3537 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3539 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3541 for ( i=0, j=0; i<nChannels; i++ ) {
3542 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3543 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3547 else if ( stream_.doConvertBuffer[0] ) {
3549 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3550 if ( stream_.doByteSwap[0] )
3551 byteSwapBuffer( stream_.deviceBuffer,
3552 stream_.bufferSize * stream_.nDeviceChannels[0],
3553 stream_.deviceFormat[0] );
3555 for ( i=0, j=0; i<nChannels; i++ ) {
3556 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3557 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3558 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3564 if ( stream_.doByteSwap[0] )
3565 byteSwapBuffer( stream_.userBuffer[0],
3566 stream_.bufferSize * stream_.nUserChannels[0],
3567 stream_.userFormat );
3569 for ( i=0, j=0; i<nChannels; i++ ) {
3570 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3571 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3572 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3578 // Don't bother draining input
3579 if ( handle->drainCounter ) {
3580 handle->drainCounter++;
3584 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3586 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3588 if (stream_.doConvertBuffer[1]) {
3590 // Always interleave ASIO input data.
3591 for ( i=0, j=0; i<nChannels; i++ ) {
3592 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3593 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3594 handle->bufferInfos[i].buffers[bufferIndex],
3598 if ( stream_.doByteSwap[1] )
3599 byteSwapBuffer( stream_.deviceBuffer,
3600 stream_.bufferSize * stream_.nDeviceChannels[1],
3601 stream_.deviceFormat[1] );
3602 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3606 for ( i=0, j=0; i<nChannels; i++ ) {
3607 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3608 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3609 handle->bufferInfos[i].buffers[bufferIndex],
3614 if ( stream_.doByteSwap[1] )
3615 byteSwapBuffer( stream_.userBuffer[1],
3616 stream_.bufferSize * stream_.nUserChannels[1],
3617 stream_.userFormat );
3622 // The following call was suggested by Malte Clasen. While the API
3623 // documentation indicates it should not be required, some device
3624 // drivers apparently do not function correctly without it.
3627 RtApi::tickStreamTime();
3631 static void sampleRateChanged( ASIOSampleRate sRate )
3633 // The ASIO documentation says that this usually only happens during
3634 // external sync. Audio processing is not stopped by the driver,
3635 // actual sample rate might not have even changed, maybe only the
3636 // sample rate status of an AES/EBU or S/PDIF digital input at the
3639 RtApi *object = (RtApi *) asioCallbackInfo->object;
3641 object->stopStream();
3643 catch ( RtAudioError &exception ) {
3644 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3648 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3651 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3655 switch( selector ) {
3656 case kAsioSelectorSupported:
3657 if ( value == kAsioResetRequest
3658 || value == kAsioEngineVersion
3659 || value == kAsioResyncRequest
3660 || value == kAsioLatenciesChanged
3661 // The following three were added for ASIO 2.0, you don't
3662 // necessarily have to support them.
3663 || value == kAsioSupportsTimeInfo
3664 || value == kAsioSupportsTimeCode
3665 || value == kAsioSupportsInputMonitor)
3668 case kAsioResetRequest:
3669 // Defer the task and perform the reset of the driver during the
3670 // next "safe" situation. You cannot reset the driver right now,
3671 // as this code is called from the driver. Reset the driver is
3672 // done by completely destruct is. I.e. ASIOStop(),
3673 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3675 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3678 case kAsioResyncRequest:
3679 // This informs the application that the driver encountered some
3680 // non-fatal data loss. It is used for synchronization purposes
3681 // of different media. Added mainly to work around the Win16Mutex
3682 // problems in Windows 95/98 with the Windows Multimedia system,
3683 // which could lose data because the Mutex was held too long by
3684 // another thread. However a driver can issue it in other
3686 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3690 case kAsioLatenciesChanged:
3691 // This will inform the host application that the drivers were
3692 // latencies changed. Beware, it this does not mean that the
3693 // buffer sizes have changed! You might need to update internal
3695 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3698 case kAsioEngineVersion:
3699 // Return the supported ASIO version of the host application. If
3700 // a host application does not implement this selector, ASIO 1.0
3701 // is assumed by the driver.
3704 case kAsioSupportsTimeInfo:
3705 // Informs the driver whether the
3706 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3707 // For compatibility with ASIO 1.0 drivers the host application
3708 // should always support the "old" bufferSwitch method, too.
3711 case kAsioSupportsTimeCode:
3712 // Informs the driver whether application is interested in time
3713 // code info. If an application does not need to know about time
3714 // code, the driver has less work to do.
3721 static const char* getAsioErrorString( ASIOError result )
3729 static const Messages m[] =
3731 { ASE_NotPresent, "Hardware input or output is not present or available." },
3732 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3733 { ASE_InvalidParameter, "Invalid input parameter." },
3734 { ASE_InvalidMode, "Invalid mode." },
3735 { ASE_SPNotAdvancing, "Sample position not advancing." },
3736 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3737 { ASE_NoMemory, "Not enough memory to complete the request." }
3740 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3741 if ( m[i].value == result ) return m[i].message;
3743 return "Unknown error.";
3746 //******************** End of __WINDOWS_ASIO__ *********************//
3750 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3752 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3753 // - Introduces support for the Windows WASAPI API
3754 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3755 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3756 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3761 #include <audioclient.h>
3763 #include <mmdeviceapi.h>
3764 #include <functiondiscoverykeys_devpkey.h>
3767 //=============================================================================
3769 #define SAFE_RELEASE( objectPtr )\
3772 objectPtr->Release();\
3776 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3778 //-----------------------------------------------------------------------------
3780 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3781 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3782 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3783 // provide intermediate storage for read / write synchronization.
3797 // sets the length of the internal ring buffer
3798 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3801 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3803 bufferSize_ = bufferSize;
3808 // attempt to push a buffer into the ring buffer at the current "in" index
3809 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3811 if ( !buffer || // incoming buffer is NULL
3812 bufferSize == 0 || // incoming buffer has no data
3813 bufferSize > bufferSize_ ) // incoming buffer too large
3818 unsigned int relOutIndex = outIndex_;
3819 unsigned int inIndexEnd = inIndex_ + bufferSize;
3820 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3821 relOutIndex += bufferSize_;
3824 // "in" index can end on the "out" index but cannot begin at it
3825 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3826 return false; // not enough space between "in" index and "out" index
3829 // copy buffer from external to internal
3830 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3831 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3832 int fromInSize = bufferSize - fromZeroSize;
3837 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3838 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3840 case RTAUDIO_SINT16:
3841 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3842 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3844 case RTAUDIO_SINT24:
3845 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3846 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3848 case RTAUDIO_SINT32:
3849 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3850 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3852 case RTAUDIO_FLOAT32:
3853 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3854 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3856 case RTAUDIO_FLOAT64:
3857 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3858 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3862 // update "in" index
3863 inIndex_ += bufferSize;
3864 inIndex_ %= bufferSize_;
3869 // attempt to pull a buffer from the ring buffer from the current "out" index
3870 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3872 if ( !buffer || // incoming buffer is NULL
3873 bufferSize == 0 || // incoming buffer has no data
3874 bufferSize > bufferSize_ ) // incoming buffer too large
3879 unsigned int relInIndex = inIndex_;
3880 unsigned int outIndexEnd = outIndex_ + bufferSize;
3881 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3882 relInIndex += bufferSize_;
3885 // "out" index can begin at and end on the "in" index
3886 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3887 return false; // not enough space between "out" index and "in" index
3890 // copy buffer from internal to external
3891 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3892 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3893 int fromOutSize = bufferSize - fromZeroSize;
3898 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3899 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3901 case RTAUDIO_SINT16:
3902 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3903 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3905 case RTAUDIO_SINT24:
3906 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3907 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3909 case RTAUDIO_SINT32:
3910 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3911 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3913 case RTAUDIO_FLOAT32:
3914 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3915 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3917 case RTAUDIO_FLOAT64:
3918 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3919 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3923 // update "out" index
3924 outIndex_ += bufferSize;
3925 outIndex_ %= bufferSize_;
3932 unsigned int bufferSize_;
3933 unsigned int inIndex_;
3934 unsigned int outIndex_;
3937 //-----------------------------------------------------------------------------
3939 // A structure to hold various information related to the WASAPI implementation.
3942 IAudioClient* captureAudioClient;
3943 IAudioClient* renderAudioClient;
3944 IAudioCaptureClient* captureClient;
3945 IAudioRenderClient* renderClient;
3946 HANDLE captureEvent;
3950 : captureAudioClient( NULL ),
3951 renderAudioClient( NULL ),
3952 captureClient( NULL ),
3953 renderClient( NULL ),
3954 captureEvent( NULL ),
3955 renderEvent( NULL ) {}
3958 //=============================================================================
3960 RtApiWasapi::RtApiWasapi()
3961 : coInitialized_( false ), deviceEnumerator_( NULL )
3963 // WASAPI can run either apartment or multi-threaded
3964 HRESULT hr = CoInitialize( NULL );
3965 if ( !FAILED( hr ) )
3966 coInitialized_ = true;
3968 // Instantiate device enumerator
3969 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3970 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3971 ( void** ) &deviceEnumerator_ );
3973 if ( FAILED( hr ) ) {
3974 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3975 error( RtAudioError::DRIVER_ERROR );
3979 //-----------------------------------------------------------------------------
3981 RtApiWasapi::~RtApiWasapi()
3983 if ( stream_.state != STREAM_CLOSED )
3986 SAFE_RELEASE( deviceEnumerator_ );
3988 // If this object previously called CoInitialize()
3989 if ( coInitialized_ )
3993 //=============================================================================
3995 unsigned int RtApiWasapi::getDeviceCount( void )
3997 unsigned int captureDeviceCount = 0;
3998 unsigned int renderDeviceCount = 0;
4000 IMMDeviceCollection* captureDevices = NULL;
4001 IMMDeviceCollection* renderDevices = NULL;
4003 // Count capture devices
4005 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4006 if ( FAILED( hr ) ) {
4007 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4011 hr = captureDevices->GetCount( &captureDeviceCount );
4012 if ( FAILED( hr ) ) {
4013 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4017 // Count render devices
4018 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4019 if ( FAILED( hr ) ) {
4020 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4024 hr = renderDevices->GetCount( &renderDeviceCount );
4025 if ( FAILED( hr ) ) {
4026 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4031 // release all references
4032 SAFE_RELEASE( captureDevices );
4033 SAFE_RELEASE( renderDevices );
4035 if ( errorText_.empty() )
4036 return captureDeviceCount + renderDeviceCount;
4038 error( RtAudioError::DRIVER_ERROR );
4042 //-----------------------------------------------------------------------------
4044 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4046 RtAudio::DeviceInfo info;
4047 unsigned int captureDeviceCount = 0;
4048 unsigned int renderDeviceCount = 0;
4049 std::string defaultDeviceName;
4050 bool isCaptureDevice = false;
4052 PROPVARIANT deviceNameProp;
4053 PROPVARIANT defaultDeviceNameProp;
4055 IMMDeviceCollection* captureDevices = NULL;
4056 IMMDeviceCollection* renderDevices = NULL;
4057 IMMDevice* devicePtr = NULL;
4058 IMMDevice* defaultDevicePtr = NULL;
4059 IAudioClient* audioClient = NULL;
4060 IPropertyStore* devicePropStore = NULL;
4061 IPropertyStore* defaultDevicePropStore = NULL;
4063 WAVEFORMATEX* deviceFormat = NULL;
4064 WAVEFORMATEX* closestMatchFormat = NULL;
4067 info.probed = false;
4069 // Count capture devices
4071 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4072 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4073 if ( FAILED( hr ) ) {
4074 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4078 hr = captureDevices->GetCount( &captureDeviceCount );
4079 if ( FAILED( hr ) ) {
4080 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4084 // Count render devices
4085 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4086 if ( FAILED( hr ) ) {
4087 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4091 hr = renderDevices->GetCount( &renderDeviceCount );
4092 if ( FAILED( hr ) ) {
4093 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4097 // validate device index
4098 if ( device >= captureDeviceCount + renderDeviceCount ) {
4099 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4100 errorType = RtAudioError::INVALID_USE;
4104 // determine whether index falls within capture or render devices
4105 if ( device >= renderDeviceCount ) {
4106 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4107 if ( FAILED( hr ) ) {
4108 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4111 isCaptureDevice = true;
4114 hr = renderDevices->Item( device, &devicePtr );
4115 if ( FAILED( hr ) ) {
4116 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4119 isCaptureDevice = false;
4122 // get default device name
4123 if ( isCaptureDevice ) {
4124 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4125 if ( FAILED( hr ) ) {
4126 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4131 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4132 if ( FAILED( hr ) ) {
4133 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4138 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4139 if ( FAILED( hr ) ) {
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4143 PropVariantInit( &defaultDeviceNameProp );
4145 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4146 if ( FAILED( hr ) ) {
4147 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4151 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4154 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4155 if ( FAILED( hr ) ) {
4156 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4160 PropVariantInit( &deviceNameProp );
4162 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4163 if ( FAILED( hr ) ) {
4164 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4168 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4171 if ( isCaptureDevice ) {
4172 info.isDefaultInput = info.name == defaultDeviceName;
4173 info.isDefaultOutput = false;
4176 info.isDefaultInput = false;
4177 info.isDefaultOutput = info.name == defaultDeviceName;
4181 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4182 if ( FAILED( hr ) ) {
4183 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4187 hr = audioClient->GetMixFormat( &deviceFormat );
4188 if ( FAILED( hr ) ) {
4189 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4193 if ( isCaptureDevice ) {
4194 info.inputChannels = deviceFormat->nChannels;
4195 info.outputChannels = 0;
4196 info.duplexChannels = 0;
4199 info.inputChannels = 0;
4200 info.outputChannels = deviceFormat->nChannels;
4201 info.duplexChannels = 0;
4204 // sample rates (WASAPI only supports the one native sample rate)
4205 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4207 info.sampleRates.clear();
4208 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4211 info.nativeFormats = 0;
4213 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4214 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4215 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4217 if ( deviceFormat->wBitsPerSample == 32 ) {
4218 info.nativeFormats |= RTAUDIO_FLOAT32;
4220 else if ( deviceFormat->wBitsPerSample == 64 ) {
4221 info.nativeFormats |= RTAUDIO_FLOAT64;
4224 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4225 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4226 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4228 if ( deviceFormat->wBitsPerSample == 8 ) {
4229 info.nativeFormats |= RTAUDIO_SINT8;
4231 else if ( deviceFormat->wBitsPerSample == 16 ) {
4232 info.nativeFormats |= RTAUDIO_SINT16;
4234 else if ( deviceFormat->wBitsPerSample == 24 ) {
4235 info.nativeFormats |= RTAUDIO_SINT24;
4237 else if ( deviceFormat->wBitsPerSample == 32 ) {
4238 info.nativeFormats |= RTAUDIO_SINT32;
4246 // release all references
4247 PropVariantClear( &deviceNameProp );
4248 PropVariantClear( &defaultDeviceNameProp );
4250 SAFE_RELEASE( captureDevices );
4251 SAFE_RELEASE( renderDevices );
4252 SAFE_RELEASE( devicePtr );
4253 SAFE_RELEASE( defaultDevicePtr );
4254 SAFE_RELEASE( audioClient );
4255 SAFE_RELEASE( devicePropStore );
4256 SAFE_RELEASE( defaultDevicePropStore );
4258 CoTaskMemFree( deviceFormat );
4259 CoTaskMemFree( closestMatchFormat );
4261 if ( !errorText_.empty() )
4266 //-----------------------------------------------------------------------------
4268 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4270 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4271 if ( getDeviceInfo( i ).isDefaultOutput ) {
4279 //-----------------------------------------------------------------------------
4281 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4283 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4284 if ( getDeviceInfo( i ).isDefaultInput ) {
4292 //-----------------------------------------------------------------------------
4294 void RtApiWasapi::closeStream( void )
4296 if ( stream_.state == STREAM_CLOSED ) {
4297 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4298 error( RtAudioError::WARNING );
4302 if ( stream_.state != STREAM_STOPPED )
4305 // clean up stream memory
4306 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4307 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4309 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4310 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4312 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4313 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4315 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4316 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4318 delete ( WasapiHandle* ) stream_.apiHandle;
4319 stream_.apiHandle = NULL;
4321 for ( int i = 0; i < 2; i++ ) {
4322 if ( stream_.userBuffer[i] ) {
4323 free( stream_.userBuffer[i] );
4324 stream_.userBuffer[i] = 0;
4328 if ( stream_.deviceBuffer ) {
4329 free( stream_.deviceBuffer );
4330 stream_.deviceBuffer = 0;
4333 // update stream state
4334 stream_.state = STREAM_CLOSED;
4337 //-----------------------------------------------------------------------------
4339 void RtApiWasapi::startStream( void )
4343 if ( stream_.state == STREAM_RUNNING ) {
4344 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4345 error( RtAudioError::WARNING );
4349 // update stream state
4350 stream_.state = STREAM_RUNNING;
4352 // create WASAPI stream thread
4353 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4355 if ( !stream_.callbackInfo.thread ) {
4356 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4357 error( RtAudioError::THREAD_ERROR );
4360 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4361 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4365 //-----------------------------------------------------------------------------
4367 void RtApiWasapi::stopStream( void )
4371 if ( stream_.state == STREAM_STOPPED ) {
4372 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4373 error( RtAudioError::WARNING );
4377 // inform stream thread by setting stream state to STREAM_STOPPING
4378 stream_.state = STREAM_STOPPING;
4380 // wait until stream thread is stopped
4381 while( stream_.state != STREAM_STOPPED ) {
4385 // Wait for the last buffer to play before stopping.
4386 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4388 // stop capture client if applicable
4389 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4390 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4391 if ( FAILED( hr ) ) {
4392 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4393 error( RtAudioError::DRIVER_ERROR );
4398 // stop render client if applicable
4399 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4400 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4401 if ( FAILED( hr ) ) {
4402 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4403 error( RtAudioError::DRIVER_ERROR );
4408 // close thread handle
4409 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4410 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4411 error( RtAudioError::THREAD_ERROR );
4415 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4418 //-----------------------------------------------------------------------------
4420 void RtApiWasapi::abortStream( void )
4424 if ( stream_.state == STREAM_STOPPED ) {
4425 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4426 error( RtAudioError::WARNING );
4430 // inform stream thread by setting stream state to STREAM_STOPPING
4431 stream_.state = STREAM_STOPPING;
4433 // wait until stream thread is stopped
4434 while ( stream_.state != STREAM_STOPPED ) {
4438 // stop capture client if applicable
4439 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4440 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4441 if ( FAILED( hr ) ) {
4442 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4443 error( RtAudioError::DRIVER_ERROR );
4448 // stop render client if applicable
4449 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4450 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4451 if ( FAILED( hr ) ) {
4452 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4453 error( RtAudioError::DRIVER_ERROR );
4458 // close thread handle
4459 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4460 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4461 error( RtAudioError::THREAD_ERROR );
4465 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4468 //-----------------------------------------------------------------------------
4470 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4471 unsigned int firstChannel, unsigned int sampleRate,
4472 RtAudioFormat format, unsigned int* bufferSize,
4473 RtAudio::StreamOptions* options )
4475 bool methodResult = FAILURE;
4476 unsigned int captureDeviceCount = 0;
4477 unsigned int renderDeviceCount = 0;
4479 IMMDeviceCollection* captureDevices = NULL;
4480 IMMDeviceCollection* renderDevices = NULL;
4481 IMMDevice* devicePtr = NULL;
4482 WAVEFORMATEX* deviceFormat = NULL;
4483 unsigned int bufferBytes;
4484 stream_.state = STREAM_STOPPED;
4485 RtAudio::DeviceInfo deviceInfo;
4487 // create API Handle if not already created
4488 if ( !stream_.apiHandle )
4489 stream_.apiHandle = ( void* ) new WasapiHandle();
4491 // Count capture devices
4493 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4494 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4495 if ( FAILED( hr ) ) {
4496 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4500 hr = captureDevices->GetCount( &captureDeviceCount );
4501 if ( FAILED( hr ) ) {
4502 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4506 // Count render devices
4507 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4508 if ( FAILED( hr ) ) {
4509 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4513 hr = renderDevices->GetCount( &renderDeviceCount );
4514 if ( FAILED( hr ) ) {
4515 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4519 // validate device index
4520 if ( device >= captureDeviceCount + renderDeviceCount ) {
4521 errorType = RtAudioError::INVALID_USE;
4522 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4526 deviceInfo = getDeviceInfo( device );
4528 // validate sample rate
4529 if ( sampleRate != deviceInfo.preferredSampleRate )
4531 errorType = RtAudioError::INVALID_USE;
4532 std::stringstream ss;
4533 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4534 << "Hz sample rate not supported. This device only supports "
4535 << deviceInfo.preferredSampleRate << "Hz.";
4536 errorText_ = ss.str();
4540 // determine whether index falls within capture or render devices
4541 if ( device >= renderDeviceCount ) {
4542 if ( mode != INPUT ) {
4543 errorType = RtAudioError::INVALID_USE;
4544 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4548 // retrieve captureAudioClient from devicePtr
4549 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4551 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4552 if ( FAILED( hr ) ) {
4553 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4557 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4558 NULL, ( void** ) &captureAudioClient );
4559 if ( FAILED( hr ) ) {
4560 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4564 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4565 if ( FAILED( hr ) ) {
4566 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4570 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4571 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4574 if ( mode != OUTPUT ) {
4575 errorType = RtAudioError::INVALID_USE;
4576 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4580 // retrieve renderAudioClient from devicePtr
4581 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4583 hr = renderDevices->Item( device, &devicePtr );
4584 if ( FAILED( hr ) ) {
4585 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4589 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4590 NULL, ( void** ) &renderAudioClient );
4591 if ( FAILED( hr ) ) {
4592 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4596 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4597 if ( FAILED( hr ) ) {
4598 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4602 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4603 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4607 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4608 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4609 stream_.mode = DUPLEX;
4612 stream_.mode = mode;
4615 stream_.device[mode] = device;
4616 stream_.doByteSwap[mode] = false;
4617 stream_.sampleRate = sampleRate;
4618 stream_.bufferSize = *bufferSize;
4619 stream_.nBuffers = 1;
4620 stream_.nUserChannels[mode] = channels;
4621 stream_.channelOffset[mode] = firstChannel;
4622 stream_.userFormat = format;
4623 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4625 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4626 stream_.userInterleaved = false;
4628 stream_.userInterleaved = true;
4629 stream_.deviceInterleaved[mode] = true;
4631 // Set flags for buffer conversion.
4632 stream_.doConvertBuffer[mode] = false;
4633 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4634 stream_.nUserChannels != stream_.nDeviceChannels )
4635 stream_.doConvertBuffer[mode] = true;
4636 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4637 stream_.nUserChannels[mode] > 1 )
4638 stream_.doConvertBuffer[mode] = true;
4640 if ( stream_.doConvertBuffer[mode] )
4641 setConvertInfo( mode, 0 );
4643 // Allocate necessary internal buffers
4644 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4646 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4647 if ( !stream_.userBuffer[mode] ) {
4648 errorType = RtAudioError::MEMORY_ERROR;
4649 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4653 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4654 stream_.callbackInfo.priority = 15;
4656 stream_.callbackInfo.priority = 0;
4658 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4659 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4661 methodResult = SUCCESS;
4665 SAFE_RELEASE( captureDevices );
4666 SAFE_RELEASE( renderDevices );
4667 SAFE_RELEASE( devicePtr );
4668 CoTaskMemFree( deviceFormat );
4670 // if method failed, close the stream
4671 if ( methodResult == FAILURE )
4674 if ( !errorText_.empty() )
4676 return methodResult;
4679 //=============================================================================
4681 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4684 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4689 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4692 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4697 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4700 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4705 //-----------------------------------------------------------------------------
4707 void RtApiWasapi::wasapiThread()
4709 // as this is a new thread, we must CoInitialize it
4710 CoInitialize( NULL );
4714 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4715 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4716 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4717 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4718 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4719 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4721 WAVEFORMATEX* captureFormat = NULL;
4722 WAVEFORMATEX* renderFormat = NULL;
4723 WasapiBuffer captureBuffer;
4724 WasapiBuffer renderBuffer;
4726 // declare local stream variables
4727 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4728 BYTE* streamBuffer = NULL;
4729 unsigned long captureFlags = 0;
4730 unsigned int bufferFrameCount = 0;
4731 unsigned int numFramesPadding = 0;
4732 bool callbackPushed = false;
4733 bool callbackPulled = false;
4734 bool callbackStopped = false;
4735 int callbackResult = 0;
4737 unsigned int deviceBuffSize = 0;
4740 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4742 // Attempt to assign "Pro Audio" characteristic to thread
4743 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4745 DWORD taskIndex = 0;
4746 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4747 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4748 FreeLibrary( AvrtDll );
4751 // start capture stream if applicable
4752 if ( captureAudioClient ) {
4753 hr = captureAudioClient->GetMixFormat( &captureFormat );
4754 if ( FAILED( hr ) ) {
4755 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4759 // initialize capture stream according to desire buffer size
4760 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4762 if ( !captureClient ) {
4763 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4764 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4765 desiredBufferPeriod,
4766 desiredBufferPeriod,
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4774 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4775 ( void** ) &captureClient );
4776 if ( FAILED( hr ) ) {
4777 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4781 // configure captureEvent to trigger on every available capture buffer
4782 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4783 if ( !captureEvent ) {
4784 errorType = RtAudioError::SYSTEM_ERROR;
4785 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4789 hr = captureAudioClient->SetEventHandle( captureEvent );
4790 if ( FAILED( hr ) ) {
4791 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4795 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4796 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4799 unsigned int inBufferSize = 0;
4800 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4806 // scale outBufferSize according to stream->user sample rate ratio
4807 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4808 inBufferSize *= stream_.nDeviceChannels[INPUT];
4810 // set captureBuffer size
4811 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4813 // reset the capture stream
4814 hr = captureAudioClient->Reset();
4815 if ( FAILED( hr ) ) {
4816 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4820 // start the capture stream
4821 hr = captureAudioClient->Start();
4822 if ( FAILED( hr ) ) {
4823 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4828 // start render stream if applicable
4829 if ( renderAudioClient ) {
4830 hr = renderAudioClient->GetMixFormat( &renderFormat );
4831 if ( FAILED( hr ) ) {
4832 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4836 // initialize render stream according to desire buffer size
4837 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4839 if ( !renderClient ) {
4840 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4841 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4842 desiredBufferPeriod,
4843 desiredBufferPeriod,
4846 if ( FAILED( hr ) ) {
4847 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4851 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4852 ( void** ) &renderClient );
4853 if ( FAILED( hr ) ) {
4854 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4858 // configure renderEvent to trigger on every available render buffer
4859 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4860 if ( !renderEvent ) {
4861 errorType = RtAudioError::SYSTEM_ERROR;
4862 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4866 hr = renderAudioClient->SetEventHandle( renderEvent );
4867 if ( FAILED( hr ) ) {
4868 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4872 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4873 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4876 unsigned int outBufferSize = 0;
4877 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4878 if ( FAILED( hr ) ) {
4879 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4883 // scale inBufferSize according to user->stream sample rate ratio
4884 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4885 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4887 // set renderBuffer size
4888 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4890 // reset the render stream
4891 hr = renderAudioClient->Reset();
4892 if ( FAILED( hr ) ) {
4893 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4897 // start the render stream
4898 hr = renderAudioClient->Start();
4899 if ( FAILED( hr ) ) {
4900 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4905 if ( stream_.mode == INPUT ) {
4906 using namespace std; // for roundf
4907 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4909 else if ( stream_.mode == OUTPUT ) {
4910 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4912 else if ( stream_.mode == DUPLEX ) {
4913 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4914 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4917 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4918 if ( !stream_.deviceBuffer ) {
4919 errorType = RtAudioError::MEMORY_ERROR;
4920 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4924 // stream process loop
4925 while ( stream_.state != STREAM_STOPPING ) {
4926 if ( !callbackPulled ) {
4929 // 1. Pull callback buffer from inputBuffer
4930 // 2. If 1. was successful: Convert callback buffer to user format
4932 if ( captureAudioClient ) {
4933 // Pull callback buffer from inputBuffer
4934 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4935 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4936 stream_.deviceFormat[INPUT] );
4938 if ( callbackPulled ) {
4939 if ( stream_.doConvertBuffer[INPUT] ) {
4940 // Convert callback buffer to user format
4941 convertBuffer( stream_.userBuffer[INPUT],
4942 stream_.deviceBuffer,
4943 stream_.convertInfo[INPUT] );
4946 // no further conversion, simple copy deviceBuffer to userBuffer
4947 memcpy( stream_.userBuffer[INPUT],
4948 stream_.deviceBuffer,
4949 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4954 // if there is no capture stream, set callbackPulled flag
4955 callbackPulled = true;
4960 // 1. Execute user callback method
4961 // 2. Handle return value from callback
4963 // if callback has not requested the stream to stop
4964 if ( callbackPulled && !callbackStopped ) {
4965 // Execute user callback method
4966 callbackResult = callback( stream_.userBuffer[OUTPUT],
4967 stream_.userBuffer[INPUT],
4970 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4971 stream_.callbackInfo.userData );
4973 // Handle return value from callback
4974 if ( callbackResult == 1 ) {
4975 // instantiate a thread to stop this thread
4976 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4977 if ( !threadHandle ) {
4978 errorType = RtAudioError::THREAD_ERROR;
4979 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4982 else if ( !CloseHandle( threadHandle ) ) {
4983 errorType = RtAudioError::THREAD_ERROR;
4984 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4988 callbackStopped = true;
4990 else if ( callbackResult == 2 ) {
4991 // instantiate a thread to stop this thread
4992 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4993 if ( !threadHandle ) {
4994 errorType = RtAudioError::THREAD_ERROR;
4995 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4998 else if ( !CloseHandle( threadHandle ) ) {
4999 errorType = RtAudioError::THREAD_ERROR;
5000 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5004 callbackStopped = true;
5011 // 1. Convert callback buffer to stream format
5012 // 2. Push callback buffer into outputBuffer
5014 if ( renderAudioClient && callbackPulled ) {
5015 if ( stream_.doConvertBuffer[OUTPUT] ) {
5016 // Convert callback buffer to stream format
5017 convertBuffer( stream_.deviceBuffer,
5018 stream_.userBuffer[OUTPUT],
5019 stream_.convertInfo[OUTPUT] );
5023 // Push callback buffer into outputBuffer
5024 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5025 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5026 stream_.deviceFormat[OUTPUT] );
5029 // if there is no render stream, set callbackPushed flag
5030 callbackPushed = true;
5035 // 1. Get capture buffer from stream
5036 // 2. Push capture buffer into inputBuffer
5037 // 3. If 2. was successful: Release capture buffer
5039 if ( captureAudioClient ) {
5040 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5041 if ( !callbackPulled ) {
5042 WaitForSingleObject( captureEvent, INFINITE );
5045 // Get capture buffer from stream
5046 hr = captureClient->GetBuffer( &streamBuffer,
5048 &captureFlags, NULL, NULL );
5049 if ( FAILED( hr ) ) {
5050 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5054 if ( bufferFrameCount != 0 ) {
5055 // Push capture buffer into inputBuffer
5056 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5057 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5058 stream_.deviceFormat[INPUT] ) )
5060 // Release capture buffer
5061 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5062 if ( FAILED( hr ) ) {
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5069 // Inform WASAPI that capture was unsuccessful
5070 hr = captureClient->ReleaseBuffer( 0 );
5071 if ( FAILED( hr ) ) {
5072 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5079 // Inform WASAPI that capture was unsuccessful
5080 hr = captureClient->ReleaseBuffer( 0 );
5081 if ( FAILED( hr ) ) {
5082 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5090 // 1. Get render buffer from stream
5091 // 2. Pull next buffer from outputBuffer
5092 // 3. If 2. was successful: Fill render buffer with next buffer
5093 // Release render buffer
5095 if ( renderAudioClient ) {
5096 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5097 if ( callbackPulled && !callbackPushed ) {
5098 WaitForSingleObject( renderEvent, INFINITE );
5101 // Get render buffer from stream
5102 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5103 if ( FAILED( hr ) ) {
5104 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5108 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5109 if ( FAILED( hr ) ) {
5110 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5114 bufferFrameCount -= numFramesPadding;
5116 if ( bufferFrameCount != 0 ) {
5117 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5118 if ( FAILED( hr ) ) {
5119 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5123 // Pull next buffer from outputBuffer
5124 // Fill render buffer with next buffer
5125 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5126 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5127 stream_.deviceFormat[OUTPUT] ) )
5129 // Release render buffer
5130 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5131 if ( FAILED( hr ) ) {
5132 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5138 // Inform WASAPI that render was unsuccessful
5139 hr = renderClient->ReleaseBuffer( 0, 0 );
5140 if ( FAILED( hr ) ) {
5141 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5148 // Inform WASAPI that render was unsuccessful
5149 hr = renderClient->ReleaseBuffer( 0, 0 );
5150 if ( FAILED( hr ) ) {
5151 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5157 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5158 if ( callbackPushed ) {
5159 callbackPulled = false;
5161 RtApi::tickStreamTime();
5168 CoTaskMemFree( captureFormat );
5169 CoTaskMemFree( renderFormat );
5173 // update stream state
5174 stream_.state = STREAM_STOPPED;
5176 if ( errorText_.empty() )
5182 //******************** End of __WINDOWS_WASAPI__ *********************//
5186 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5188 // Modified by Robin Davies, October 2005
5189 // - Improvements to DirectX pointer chasing.
5190 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5191 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5192 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5193 // Changed device query structure for RtAudio 4.0.7, January 2010
5195 #include <windows.h>
5196 #include <process.h>
5197 #include <mmsystem.h>
5201 #include <algorithm>
5203 #if defined(__MINGW32__)
5204 // missing from latest mingw winapi
5205 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5206 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5207 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5208 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5211 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5213 #ifdef _MSC_VER // if Microsoft Visual C++
5214 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5217 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5219 if ( pointer > bufferSize ) pointer -= bufferSize;
5220 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5221 if ( pointer < earlierPointer ) pointer += bufferSize;
5222 return pointer >= earlierPointer && pointer < laterPointer;
5225 // A structure to hold various information related to the DirectSound
5226 // API implementation.
5228 unsigned int drainCounter; // Tracks callback counts when draining
5229 bool internalDrain; // Indicates if stop is initiated from callback or not.
5233 UINT bufferPointer[2];
5234 DWORD dsBufferSize[2];
5235 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5239 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5242 // Declarations for utility functions, callbacks, and structures
5243 // specific to the DirectSound implementation.
5244 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5245 LPCTSTR description,
5249 static const char* getErrorString( int code );
5251 static unsigned __stdcall callbackHandler( void *ptr );
5260 : found(false) { validId[0] = false; validId[1] = false; }
5263 struct DsProbeData {
5265 std::vector<struct DsDevice>* dsDevices;
5268 RtApiDs :: RtApiDs()
5270 // Dsound will run both-threaded. If CoInitialize fails, then just
5271 // accept whatever the mainline chose for a threading model.
5272 coInitialized_ = false;
5273 HRESULT hr = CoInitialize( NULL );
5274 if ( !FAILED( hr ) ) coInitialized_ = true;
5277 RtApiDs :: ~RtApiDs()
5279 if ( stream_.state != STREAM_CLOSED ) closeStream();
5280 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5283 // The DirectSound default output is always the first device.
5284 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5289 // The DirectSound default input is always the first input device,
5290 // which is the first capture device enumerated.
5291 unsigned int RtApiDs :: getDefaultInputDevice( void )
5296 unsigned int RtApiDs :: getDeviceCount( void )
5298 // Set query flag for previously found devices to false, so that we
5299 // can check for any devices that have disappeared.
5300 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5301 dsDevices[i].found = false;
5303 // Query DirectSound devices.
5304 struct DsProbeData probeInfo;
5305 probeInfo.isInput = false;
5306 probeInfo.dsDevices = &dsDevices;
5307 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5308 if ( FAILED( result ) ) {
5309 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5310 errorText_ = errorStream_.str();
5311 error( RtAudioError::WARNING );
5314 // Query DirectSoundCapture devices.
5315 probeInfo.isInput = true;
5316 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5317 if ( FAILED( result ) ) {
5318 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5319 errorText_ = errorStream_.str();
5320 error( RtAudioError::WARNING );
5323 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5324 for ( unsigned int i=0; i<dsDevices.size(); ) {
5325 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5329 return static_cast<unsigned int>(dsDevices.size());
5332 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5334 RtAudio::DeviceInfo info;
5335 info.probed = false;
5337 if ( dsDevices.size() == 0 ) {
5338 // Force a query of all devices
5340 if ( dsDevices.size() == 0 ) {
5341 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5342 error( RtAudioError::INVALID_USE );
5347 if ( device >= dsDevices.size() ) {
5348 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5349 error( RtAudioError::INVALID_USE );
5354 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5356 LPDIRECTSOUND output;
5358 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5359 if ( FAILED( result ) ) {
5360 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5361 errorText_ = errorStream_.str();
5362 error( RtAudioError::WARNING );
5366 outCaps.dwSize = sizeof( outCaps );
5367 result = output->GetCaps( &outCaps );
5368 if ( FAILED( result ) ) {
5370 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5371 errorText_ = errorStream_.str();
5372 error( RtAudioError::WARNING );
5376 // Get output channel information.
5377 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5379 // Get sample rate information.
5380 info.sampleRates.clear();
5381 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5382 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5383 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5384 info.sampleRates.push_back( SAMPLE_RATES[k] );
5386 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5387 info.preferredSampleRate = SAMPLE_RATES[k];
5391 // Get format information.
5392 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5393 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5397 if ( getDefaultOutputDevice() == device )
5398 info.isDefaultOutput = true;
5400 if ( dsDevices[ device ].validId[1] == false ) {
5401 info.name = dsDevices[ device ].name;
5408 LPDIRECTSOUNDCAPTURE input;
5409 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5410 if ( FAILED( result ) ) {
5411 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5412 errorText_ = errorStream_.str();
5413 error( RtAudioError::WARNING );
5418 inCaps.dwSize = sizeof( inCaps );
5419 result = input->GetCaps( &inCaps );
5420 if ( FAILED( result ) ) {
5422 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5423 errorText_ = errorStream_.str();
5424 error( RtAudioError::WARNING );
5428 // Get input channel information.
5429 info.inputChannels = inCaps.dwChannels;
5431 // Get sample rate and format information.
5432 std::vector<unsigned int> rates;
5433 if ( inCaps.dwChannels >= 2 ) {
5434 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5435 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5436 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5437 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5438 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5439 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5440 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5441 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5443 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5444 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5445 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5446 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5447 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5449 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5450 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5451 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5452 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5453 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5456 else if ( inCaps.dwChannels == 1 ) {
5457 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5458 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5459 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5460 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5461 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5462 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5463 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5464 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5466 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5467 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5468 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5469 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5470 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5472 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5473 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5474 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5475 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5476 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5479 else info.inputChannels = 0; // technically, this would be an error
5483 if ( info.inputChannels == 0 ) return info;
5485 // Copy the supported rates to the info structure but avoid duplication.
5487 for ( unsigned int i=0; i<rates.size(); i++ ) {
5489 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5490 if ( rates[i] == info.sampleRates[j] ) {
5495 if ( found == false ) info.sampleRates.push_back( rates[i] );
5497 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5499 // If device opens for both playback and capture, we determine the channels.
5500 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5501 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5503 if ( device == 0 ) info.isDefaultInput = true;
5505 // Copy name and return.
5506 info.name = dsDevices[ device ].name;
5511 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5512 unsigned int firstChannel, unsigned int sampleRate,
5513 RtAudioFormat format, unsigned int *bufferSize,
5514 RtAudio::StreamOptions *options )
5516 if ( channels + firstChannel > 2 ) {
5517 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5521 size_t nDevices = dsDevices.size();
5522 if ( nDevices == 0 ) {
5523 // This should not happen because a check is made before this function is called.
5524 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5528 if ( device >= nDevices ) {
5529 // This should not happen because a check is made before this function is called.
5530 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5534 if ( mode == OUTPUT ) {
5535 if ( dsDevices[ device ].validId[0] == false ) {
5536 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5537 errorText_ = errorStream_.str();
5541 else { // mode == INPUT
5542 if ( dsDevices[ device ].validId[1] == false ) {
5543 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5544 errorText_ = errorStream_.str();
5549 // According to a note in PortAudio, using GetDesktopWindow()
5550 // instead of GetForegroundWindow() is supposed to avoid problems
5551 // that occur when the application's window is not the foreground
5552 // window. Also, if the application window closes before the
5553 // DirectSound buffer, DirectSound can crash. In the past, I had
5554 // problems when using GetDesktopWindow() but it seems fine now
5555 // (January 2010). I'll leave it commented here.
5556 // HWND hWnd = GetForegroundWindow();
5557 HWND hWnd = GetDesktopWindow();
5559 // Check the numberOfBuffers parameter and limit the lowest value to
5560 // two. This is a judgement call and a value of two is probably too
5561 // low for capture, but it should work for playback.
5563 if ( options ) nBuffers = options->numberOfBuffers;
5564 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5565 if ( nBuffers < 2 ) nBuffers = 3;
5567 // Check the lower range of the user-specified buffer size and set
5568 // (arbitrarily) to a lower bound of 32.
5569 if ( *bufferSize < 32 ) *bufferSize = 32;
5571 // Create the wave format structure. The data format setting will
5572 // be determined later.
5573 WAVEFORMATEX waveFormat;
5574 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5575 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5576 waveFormat.nChannels = channels + firstChannel;
5577 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5579 // Determine the device buffer size. By default, we'll use the value
5580 // defined above (32K), but we will grow it to make allowances for
5581 // very large software buffer sizes.
5582 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5583 DWORD dsPointerLeadTime = 0;
5585 void *ohandle = 0, *bhandle = 0;
5587 if ( mode == OUTPUT ) {
5589 LPDIRECTSOUND output;
5590 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5591 if ( FAILED( result ) ) {
5592 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5593 errorText_ = errorStream_.str();
5598 outCaps.dwSize = sizeof( outCaps );
5599 result = output->GetCaps( &outCaps );
5600 if ( FAILED( result ) ) {
5602 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5603 errorText_ = errorStream_.str();
5607 // Check channel information.
5608 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5609 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5610 errorText_ = errorStream_.str();
5614 // Check format information. Use 16-bit format unless not
5615 // supported or user requests 8-bit.
5616 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5617 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5618 waveFormat.wBitsPerSample = 16;
5619 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5622 waveFormat.wBitsPerSample = 8;
5623 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5625 stream_.userFormat = format;
5627 // Update wave format structure and buffer information.
5628 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5629 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5630 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5632 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5633 while ( dsPointerLeadTime * 2U > dsBufferSize )
5636 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5637 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5638 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5639 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5640 if ( FAILED( result ) ) {
5642 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5643 errorText_ = errorStream_.str();
5647 // Even though we will write to the secondary buffer, we need to
5648 // access the primary buffer to set the correct output format
5649 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5650 // buffer description.
5651 DSBUFFERDESC bufferDescription;
5652 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5653 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5654 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5656 // Obtain the primary buffer
5657 LPDIRECTSOUNDBUFFER buffer;
5658 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5659 if ( FAILED( result ) ) {
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5662 errorText_ = errorStream_.str();
5666 // Set the primary DS buffer sound format.
5667 result = buffer->SetFormat( &waveFormat );
5668 if ( FAILED( result ) ) {
5670 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5671 errorText_ = errorStream_.str();
5675 // Setup the secondary DS buffer description.
5676 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5677 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5678 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5679 DSBCAPS_GLOBALFOCUS |
5680 DSBCAPS_GETCURRENTPOSITION2 |
5681 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5682 bufferDescription.dwBufferBytes = dsBufferSize;
5683 bufferDescription.lpwfxFormat = &waveFormat;
5685 // Try to create the secondary DS buffer. If that doesn't work,
5686 // try to use software mixing. Otherwise, there's a problem.
5687 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5688 if ( FAILED( result ) ) {
5689 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5690 DSBCAPS_GLOBALFOCUS |
5691 DSBCAPS_GETCURRENTPOSITION2 |
5692 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5693 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5694 if ( FAILED( result ) ) {
5696 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5697 errorText_ = errorStream_.str();
5702 // Get the buffer size ... might be different from what we specified.
5704 dsbcaps.dwSize = sizeof( DSBCAPS );
5705 result = buffer->GetCaps( &dsbcaps );
5706 if ( FAILED( result ) ) {
5709 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5710 errorText_ = errorStream_.str();
5714 dsBufferSize = dsbcaps.dwBufferBytes;
5716 // Lock the DS buffer
5719 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5720 if ( FAILED( result ) ) {
5723 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5724 errorText_ = errorStream_.str();
5728 // Zero the DS buffer
5729 ZeroMemory( audioPtr, dataLen );
5731 // Unlock the DS buffer
5732 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5733 if ( FAILED( result ) ) {
5736 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5737 errorText_ = errorStream_.str();
5741 ohandle = (void *) output;
5742 bhandle = (void *) buffer;
5745 if ( mode == INPUT ) {
5747 LPDIRECTSOUNDCAPTURE input;
5748 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5749 if ( FAILED( result ) ) {
5750 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5751 errorText_ = errorStream_.str();
5756 inCaps.dwSize = sizeof( inCaps );
5757 result = input->GetCaps( &inCaps );
5758 if ( FAILED( result ) ) {
5760 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5761 errorText_ = errorStream_.str();
5765 // Check channel information.
5766 if ( inCaps.dwChannels < channels + firstChannel ) {
5767 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5771 // Check format information. Use 16-bit format unless user
5773 DWORD deviceFormats;
5774 if ( channels + firstChannel == 2 ) {
5775 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5776 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5777 waveFormat.wBitsPerSample = 8;
5778 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5780 else { // assume 16-bit is supported
5781 waveFormat.wBitsPerSample = 16;
5782 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5785 else { // channel == 1
5786 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5787 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5788 waveFormat.wBitsPerSample = 8;
5789 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5791 else { // assume 16-bit is supported
5792 waveFormat.wBitsPerSample = 16;
5793 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5796 stream_.userFormat = format;
5798 // Update wave format structure and buffer information.
5799 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5800 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5801 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5803 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5804 while ( dsPointerLeadTime * 2U > dsBufferSize )
5807 // Setup the secondary DS buffer description.
5808 DSCBUFFERDESC bufferDescription;
5809 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5810 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5811 bufferDescription.dwFlags = 0;
5812 bufferDescription.dwReserved = 0;
5813 bufferDescription.dwBufferBytes = dsBufferSize;
5814 bufferDescription.lpwfxFormat = &waveFormat;
5816 // Create the capture buffer.
5817 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5818 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5819 if ( FAILED( result ) ) {
5821 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5822 errorText_ = errorStream_.str();
5826 // Get the buffer size ... might be different from what we specified.
5828 dscbcaps.dwSize = sizeof( DSCBCAPS );
5829 result = buffer->GetCaps( &dscbcaps );
5830 if ( FAILED( result ) ) {
5833 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5834 errorText_ = errorStream_.str();
5838 dsBufferSize = dscbcaps.dwBufferBytes;
5840 // NOTE: We could have a problem here if this is a duplex stream
5841 // and the play and capture hardware buffer sizes are different
5842 // (I'm actually not sure if that is a problem or not).
5843 // Currently, we are not verifying that.
5845 // Lock the capture buffer
5848 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5849 if ( FAILED( result ) ) {
5852 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5853 errorText_ = errorStream_.str();
5858 ZeroMemory( audioPtr, dataLen );
5860 // Unlock the buffer
5861 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5862 if ( FAILED( result ) ) {
5865 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5866 errorText_ = errorStream_.str();
5870 ohandle = (void *) input;
5871 bhandle = (void *) buffer;
5874 // Set various stream parameters
5875 DsHandle *handle = 0;
5876 stream_.nDeviceChannels[mode] = channels + firstChannel;
5877 stream_.nUserChannels[mode] = channels;
5878 stream_.bufferSize = *bufferSize;
5879 stream_.channelOffset[mode] = firstChannel;
5880 stream_.deviceInterleaved[mode] = true;
5881 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5882 else stream_.userInterleaved = true;
5884 // Set flag for buffer conversion
5885 stream_.doConvertBuffer[mode] = false;
5886 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5887 stream_.doConvertBuffer[mode] = true;
5888 if (stream_.userFormat != stream_.deviceFormat[mode])
5889 stream_.doConvertBuffer[mode] = true;
5890 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5891 stream_.nUserChannels[mode] > 1 )
5892 stream_.doConvertBuffer[mode] = true;
5894 // Allocate necessary internal buffers
5895 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5896 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5897 if ( stream_.userBuffer[mode] == NULL ) {
5898 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5902 if ( stream_.doConvertBuffer[mode] ) {
5904 bool makeBuffer = true;
5905 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5906 if ( mode == INPUT ) {
5907 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5908 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5909 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5914 bufferBytes *= *bufferSize;
5915 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5916 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5917 if ( stream_.deviceBuffer == NULL ) {
5918 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5924 // Allocate our DsHandle structures for the stream.
5925 if ( stream_.apiHandle == 0 ) {
5927 handle = new DsHandle;
5929 catch ( std::bad_alloc& ) {
5930 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5934 // Create a manual-reset event.
5935 handle->condition = CreateEvent( NULL, // no security
5936 TRUE, // manual-reset
5937 FALSE, // non-signaled initially
5939 stream_.apiHandle = (void *) handle;
5942 handle = (DsHandle *) stream_.apiHandle;
5943 handle->id[mode] = ohandle;
5944 handle->buffer[mode] = bhandle;
5945 handle->dsBufferSize[mode] = dsBufferSize;
5946 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5948 stream_.device[mode] = device;
5949 stream_.state = STREAM_STOPPED;
5950 if ( stream_.mode == OUTPUT && mode == INPUT )
5951 // We had already set up an output stream.
5952 stream_.mode = DUPLEX;
5954 stream_.mode = mode;
5955 stream_.nBuffers = nBuffers;
5956 stream_.sampleRate = sampleRate;
5958 // Setup the buffer conversion information structure.
5959 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5961 // Setup the callback thread.
5962 if ( stream_.callbackInfo.isRunning == false ) {
5964 stream_.callbackInfo.isRunning = true;
5965 stream_.callbackInfo.object = (void *) this;
5966 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5967 &stream_.callbackInfo, 0, &threadId );
5968 if ( stream_.callbackInfo.thread == 0 ) {
5969 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5973 // Boost DS thread priority
5974 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5980 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5981 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5982 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5983 if ( buffer ) buffer->Release();
5986 if ( handle->buffer[1] ) {
5987 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5988 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5989 if ( buffer ) buffer->Release();
5992 CloseHandle( handle->condition );
5994 stream_.apiHandle = 0;
5997 for ( int i=0; i<2; i++ ) {
5998 if ( stream_.userBuffer[i] ) {
5999 free( stream_.userBuffer[i] );
6000 stream_.userBuffer[i] = 0;
6004 if ( stream_.deviceBuffer ) {
6005 free( stream_.deviceBuffer );
6006 stream_.deviceBuffer = 0;
6009 stream_.state = STREAM_CLOSED;
6013 void RtApiDs :: closeStream()
6015 if ( stream_.state == STREAM_CLOSED ) {
6016 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6017 error( RtAudioError::WARNING );
6021 // Stop the callback thread.
6022 stream_.callbackInfo.isRunning = false;
6023 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6024 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6026 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6028 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6029 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6030 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6037 if ( handle->buffer[1] ) {
6038 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6039 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6046 CloseHandle( handle->condition );
6048 stream_.apiHandle = 0;
6051 for ( int i=0; i<2; i++ ) {
6052 if ( stream_.userBuffer[i] ) {
6053 free( stream_.userBuffer[i] );
6054 stream_.userBuffer[i] = 0;
6058 if ( stream_.deviceBuffer ) {
6059 free( stream_.deviceBuffer );
6060 stream_.deviceBuffer = 0;
6063 stream_.mode = UNINITIALIZED;
6064 stream_.state = STREAM_CLOSED;
6067 void RtApiDs :: startStream()
6070 if ( stream_.state == STREAM_RUNNING ) {
6071 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6072 error( RtAudioError::WARNING );
6076 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6078 // Increase scheduler frequency on lesser windows (a side-effect of
6079 // increasing timer accuracy). On greater windows (Win2K or later),
6080 // this is already in effect.
6081 timeBeginPeriod( 1 );
6083 buffersRolling = false;
6084 duplexPrerollBytes = 0;
6086 if ( stream_.mode == DUPLEX ) {
6087 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6088 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6092 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6094 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6095 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6096 if ( FAILED( result ) ) {
6097 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6098 errorText_ = errorStream_.str();
6103 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6105 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6106 result = buffer->Start( DSCBSTART_LOOPING );
6107 if ( FAILED( result ) ) {
6108 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6109 errorText_ = errorStream_.str();
6114 handle->drainCounter = 0;
6115 handle->internalDrain = false;
6116 ResetEvent( handle->condition );
6117 stream_.state = STREAM_RUNNING;
6120 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6123 void RtApiDs :: stopStream()
6126 if ( stream_.state == STREAM_STOPPED ) {
6127 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6128 error( RtAudioError::WARNING );
6135 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6136 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6137 if ( handle->drainCounter == 0 ) {
6138 handle->drainCounter = 2;
6139 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6142 stream_.state = STREAM_STOPPED;
6144 MUTEX_LOCK( &stream_.mutex );
6146 // Stop the buffer and clear memory
6147 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6148 result = buffer->Stop();
6149 if ( FAILED( result ) ) {
6150 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6151 errorText_ = errorStream_.str();
6155 // Lock the buffer and clear it so that if we start to play again,
6156 // we won't have old data playing.
6157 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6158 if ( FAILED( result ) ) {
6159 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6160 errorText_ = errorStream_.str();
6164 // Zero the DS buffer
6165 ZeroMemory( audioPtr, dataLen );
6167 // Unlock the DS buffer
6168 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6169 if ( FAILED( result ) ) {
6170 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6171 errorText_ = errorStream_.str();
6175 // If we start playing again, we must begin at beginning of buffer.
6176 handle->bufferPointer[0] = 0;
6179 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6180 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6184 stream_.state = STREAM_STOPPED;
6186 if ( stream_.mode != DUPLEX )
6187 MUTEX_LOCK( &stream_.mutex );
6189 result = buffer->Stop();
6190 if ( FAILED( result ) ) {
6191 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6192 errorText_ = errorStream_.str();
6196 // Lock the buffer and clear it so that if we start to play again,
6197 // we won't have old data playing.
6198 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6199 if ( FAILED( result ) ) {
6200 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6201 errorText_ = errorStream_.str();
6205 // Zero the DS buffer
6206 ZeroMemory( audioPtr, dataLen );
6208 // Unlock the DS buffer
6209 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6210 if ( FAILED( result ) ) {
6211 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6212 errorText_ = errorStream_.str();
6216 // If we start recording again, we must begin at beginning of buffer.
6217 handle->bufferPointer[1] = 0;
6221 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6222 MUTEX_UNLOCK( &stream_.mutex );
6224 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6227 void RtApiDs :: abortStream()
6230 if ( stream_.state == STREAM_STOPPED ) {
6231 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6232 error( RtAudioError::WARNING );
6236 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6237 handle->drainCounter = 2;
6242 void RtApiDs :: callbackEvent()
6244 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6245 Sleep( 50 ); // sleep 50 milliseconds
6249 if ( stream_.state == STREAM_CLOSED ) {
6250 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6251 error( RtAudioError::WARNING );
6255 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6256 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6258 // Check if we were draining the stream and signal is finished.
6259 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6261 stream_.state = STREAM_STOPPING;
6262 if ( handle->internalDrain == false )
6263 SetEvent( handle->condition );
6269 // Invoke user callback to get fresh output data UNLESS we are
6271 if ( handle->drainCounter == 0 ) {
6272 RtAudioCallback callback = (RtAudioCallback) info->callback;
6273 double streamTime = getStreamTime();
6274 RtAudioStreamStatus status = 0;
6275 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6276 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6277 handle->xrun[0] = false;
6279 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6280 status |= RTAUDIO_INPUT_OVERFLOW;
6281 handle->xrun[1] = false;
6283 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6284 stream_.bufferSize, streamTime, status, info->userData );
6285 if ( cbReturnValue == 2 ) {
6286 stream_.state = STREAM_STOPPING;
6287 handle->drainCounter = 2;
6291 else if ( cbReturnValue == 1 ) {
6292 handle->drainCounter = 1;
6293 handle->internalDrain = true;
6298 DWORD currentWritePointer, safeWritePointer;
6299 DWORD currentReadPointer, safeReadPointer;
6300 UINT nextWritePointer;
6302 LPVOID buffer1 = NULL;
6303 LPVOID buffer2 = NULL;
6304 DWORD bufferSize1 = 0;
6305 DWORD bufferSize2 = 0;
6310 MUTEX_LOCK( &stream_.mutex );
6311 if ( stream_.state == STREAM_STOPPED ) {
6312 MUTEX_UNLOCK( &stream_.mutex );
6316 if ( buffersRolling == false ) {
6317 if ( stream_.mode == DUPLEX ) {
6318 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6320 // It takes a while for the devices to get rolling. As a result,
6321 // there's no guarantee that the capture and write device pointers
6322 // will move in lockstep. Wait here for both devices to start
6323 // rolling, and then set our buffer pointers accordingly.
6324 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6325 // bytes later than the write buffer.
6327 // Stub: a serious risk of having a pre-emptive scheduling round
6328 // take place between the two GetCurrentPosition calls... but I'm
6329 // really not sure how to solve the problem. Temporarily boost to
6330 // Realtime priority, maybe; but I'm not sure what priority the
6331 // DirectSound service threads run at. We *should* be roughly
6332 // within a ms or so of correct.
6334 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6335 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6337 DWORD startSafeWritePointer, startSafeReadPointer;
6339 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6340 if ( FAILED( result ) ) {
6341 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6342 errorText_ = errorStream_.str();
6343 MUTEX_UNLOCK( &stream_.mutex );
6344 error( RtAudioError::SYSTEM_ERROR );
6347 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6348 if ( FAILED( result ) ) {
6349 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6350 errorText_ = errorStream_.str();
6351 MUTEX_UNLOCK( &stream_.mutex );
6352 error( RtAudioError::SYSTEM_ERROR );
6356 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6357 if ( FAILED( result ) ) {
6358 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6359 errorText_ = errorStream_.str();
6360 MUTEX_UNLOCK( &stream_.mutex );
6361 error( RtAudioError::SYSTEM_ERROR );
6364 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6365 if ( FAILED( result ) ) {
6366 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6367 errorText_ = errorStream_.str();
6368 MUTEX_UNLOCK( &stream_.mutex );
6369 error( RtAudioError::SYSTEM_ERROR );
6372 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6376 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6378 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6379 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6380 handle->bufferPointer[1] = safeReadPointer;
6382 else if ( stream_.mode == OUTPUT ) {
6384 // Set the proper nextWritePosition after initial startup.
6385 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6386 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6387 if ( FAILED( result ) ) {
6388 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6389 errorText_ = errorStream_.str();
6390 MUTEX_UNLOCK( &stream_.mutex );
6391 error( RtAudioError::SYSTEM_ERROR );
6394 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6395 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6398 buffersRolling = true;
6401 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6403 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6405 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6406 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6407 bufferBytes *= formatBytes( stream_.userFormat );
6408 memset( stream_.userBuffer[0], 0, bufferBytes );
6411 // Setup parameters and do buffer conversion if necessary.
6412 if ( stream_.doConvertBuffer[0] ) {
6413 buffer = stream_.deviceBuffer;
6414 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6415 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6416 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6419 buffer = stream_.userBuffer[0];
6420 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6421 bufferBytes *= formatBytes( stream_.userFormat );
6424 // No byte swapping necessary in DirectSound implementation.
6426 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6427 // unsigned. So, we need to convert our signed 8-bit data here to
6429 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6430 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6432 DWORD dsBufferSize = handle->dsBufferSize[0];
6433 nextWritePointer = handle->bufferPointer[0];
6435 DWORD endWrite, leadPointer;
6437 // Find out where the read and "safe write" pointers are.
6438 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6439 if ( FAILED( result ) ) {
6440 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6441 errorText_ = errorStream_.str();
6442 MUTEX_UNLOCK( &stream_.mutex );
6443 error( RtAudioError::SYSTEM_ERROR );
6447 // We will copy our output buffer into the region between
6448 // safeWritePointer and leadPointer. If leadPointer is not
6449 // beyond the next endWrite position, wait until it is.
6450 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6451 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6452 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6453 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6454 endWrite = nextWritePointer + bufferBytes;
6456 // Check whether the entire write region is behind the play pointer.
6457 if ( leadPointer >= endWrite ) break;
6459 // If we are here, then we must wait until the leadPointer advances
6460 // beyond the end of our next write region. We use the
6461 // Sleep() function to suspend operation until that happens.
6462 double millis = ( endWrite - leadPointer ) * 1000.0;
6463 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6464 if ( millis < 1.0 ) millis = 1.0;
6465 Sleep( (DWORD) millis );
6468 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6469 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6470 // We've strayed into the forbidden zone ... resync the read pointer.
6471 handle->xrun[0] = true;
6472 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6473 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6474 handle->bufferPointer[0] = nextWritePointer;
6475 endWrite = nextWritePointer + bufferBytes;
6478 // Lock free space in the buffer
6479 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6480 &bufferSize1, &buffer2, &bufferSize2, 0 );
6481 if ( FAILED( result ) ) {
6482 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6483 errorText_ = errorStream_.str();
6484 MUTEX_UNLOCK( &stream_.mutex );
6485 error( RtAudioError::SYSTEM_ERROR );
6489 // Copy our buffer into the DS buffer
6490 CopyMemory( buffer1, buffer, bufferSize1 );
6491 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6493 // Update our buffer offset and unlock sound buffer
6494 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6495 if ( FAILED( result ) ) {
6496 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6497 errorText_ = errorStream_.str();
6498 MUTEX_UNLOCK( &stream_.mutex );
6499 error( RtAudioError::SYSTEM_ERROR );
6502 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6503 handle->bufferPointer[0] = nextWritePointer;
6506 // Don't bother draining input
6507 if ( handle->drainCounter ) {
6508 handle->drainCounter++;
6512 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6514 // Setup parameters.
6515 if ( stream_.doConvertBuffer[1] ) {
6516 buffer = stream_.deviceBuffer;
6517 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6518 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6521 buffer = stream_.userBuffer[1];
6522 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6523 bufferBytes *= formatBytes( stream_.userFormat );
6526 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6527 long nextReadPointer = handle->bufferPointer[1];
6528 DWORD dsBufferSize = handle->dsBufferSize[1];
6530 // Find out where the write and "safe read" pointers are.
6531 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6532 if ( FAILED( result ) ) {
6533 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6534 errorText_ = errorStream_.str();
6535 MUTEX_UNLOCK( &stream_.mutex );
6536 error( RtAudioError::SYSTEM_ERROR );
6540 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6541 DWORD endRead = nextReadPointer + bufferBytes;
6543 // Handling depends on whether we are INPUT or DUPLEX.
6544 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6545 // then a wait here will drag the write pointers into the forbidden zone.
6547 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6548 // it's in a safe position. This causes dropouts, but it seems to be the only
6549 // practical way to sync up the read and write pointers reliably, given the
6550 // the very complex relationship between phase and increment of the read and write
6553 // In order to minimize audible dropouts in DUPLEX mode, we will
6554 // provide a pre-roll period of 0.5 seconds in which we return
6555 // zeros from the read buffer while the pointers sync up.
6557 if ( stream_.mode == DUPLEX ) {
6558 if ( safeReadPointer < endRead ) {
6559 if ( duplexPrerollBytes <= 0 ) {
6560 // Pre-roll time over. Be more agressive.
6561 int adjustment = endRead-safeReadPointer;
6563 handle->xrun[1] = true;
6565 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6566 // and perform fine adjustments later.
6567 // - small adjustments: back off by twice as much.
6568 if ( adjustment >= 2*bufferBytes )
6569 nextReadPointer = safeReadPointer-2*bufferBytes;
6571 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6573 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6577 // In pre=roll time. Just do it.
6578 nextReadPointer = safeReadPointer - bufferBytes;
6579 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6581 endRead = nextReadPointer + bufferBytes;
6584 else { // mode == INPUT
6585 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6586 // See comments for playback.
6587 double millis = (endRead - safeReadPointer) * 1000.0;
6588 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6589 if ( millis < 1.0 ) millis = 1.0;
6590 Sleep( (DWORD) millis );
6592 // Wake up and find out where we are now.
6593 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6594 if ( FAILED( result ) ) {
6595 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6596 errorText_ = errorStream_.str();
6597 MUTEX_UNLOCK( &stream_.mutex );
6598 error( RtAudioError::SYSTEM_ERROR );
6602 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6606 // Lock free space in the buffer
6607 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6608 &bufferSize1, &buffer2, &bufferSize2, 0 );
6609 if ( FAILED( result ) ) {
6610 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6611 errorText_ = errorStream_.str();
6612 MUTEX_UNLOCK( &stream_.mutex );
6613 error( RtAudioError::SYSTEM_ERROR );
6617 if ( duplexPrerollBytes <= 0 ) {
6618 // Copy our buffer into the DS buffer
6619 CopyMemory( buffer, buffer1, bufferSize1 );
6620 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6623 memset( buffer, 0, bufferSize1 );
6624 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6625 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6628 // Update our buffer offset and unlock sound buffer
6629 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6630 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6631 if ( FAILED( result ) ) {
6632 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6633 errorText_ = errorStream_.str();
6634 MUTEX_UNLOCK( &stream_.mutex );
6635 error( RtAudioError::SYSTEM_ERROR );
6638 handle->bufferPointer[1] = nextReadPointer;
6640 // No byte swapping necessary in DirectSound implementation.
6642 // If necessary, convert 8-bit data from unsigned to signed.
6643 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6644 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6646 // Do buffer conversion if necessary.
6647 if ( stream_.doConvertBuffer[1] )
6648 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6652 MUTEX_UNLOCK( &stream_.mutex );
6653 RtApi::tickStreamTime();
6656 // Definitions for utility functions and callbacks
6657 // specific to the DirectSound implementation.
6659 static unsigned __stdcall callbackHandler( void *ptr )
6661 CallbackInfo *info = (CallbackInfo *) ptr;
6662 RtApiDs *object = (RtApiDs *) info->object;
6663 bool* isRunning = &info->isRunning;
6665 while ( *isRunning == true ) {
6666 object->callbackEvent();
6673 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6674 LPCTSTR description,
6678 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6679 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6682 bool validDevice = false;
6683 if ( probeInfo.isInput == true ) {
6685 LPDIRECTSOUNDCAPTURE object;
6687 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6688 if ( hr != DS_OK ) return TRUE;
6690 caps.dwSize = sizeof(caps);
6691 hr = object->GetCaps( &caps );
6692 if ( hr == DS_OK ) {
6693 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6700 LPDIRECTSOUND object;
6701 hr = DirectSoundCreate( lpguid, &object, NULL );
6702 if ( hr != DS_OK ) return TRUE;
6704 caps.dwSize = sizeof(caps);
6705 hr = object->GetCaps( &caps );
6706 if ( hr == DS_OK ) {
6707 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6713 // If good device, then save its name and guid.
6714 std::string name = convertCharPointerToStdString( description );
6715 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6716 if ( lpguid == NULL )
6717 name = "Default Device";
6718 if ( validDevice ) {
6719 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6720 if ( dsDevices[i].name == name ) {
6721 dsDevices[i].found = true;
6722 if ( probeInfo.isInput ) {
6723 dsDevices[i].id[1] = lpguid;
6724 dsDevices[i].validId[1] = true;
6727 dsDevices[i].id[0] = lpguid;
6728 dsDevices[i].validId[0] = true;
6736 device.found = true;
6737 if ( probeInfo.isInput ) {
6738 device.id[1] = lpguid;
6739 device.validId[1] = true;
6742 device.id[0] = lpguid;
6743 device.validId[0] = true;
6745 dsDevices.push_back( device );
6751 static const char* getErrorString( int code )
6755 case DSERR_ALLOCATED:
6756 return "Already allocated";
6758 case DSERR_CONTROLUNAVAIL:
6759 return "Control unavailable";
6761 case DSERR_INVALIDPARAM:
6762 return "Invalid parameter";
6764 case DSERR_INVALIDCALL:
6765 return "Invalid call";
6768 return "Generic error";
6770 case DSERR_PRIOLEVELNEEDED:
6771 return "Priority level needed";
6773 case DSERR_OUTOFMEMORY:
6774 return "Out of memory";
6776 case DSERR_BADFORMAT:
6777 return "The sample rate or the channel format is not supported";
6779 case DSERR_UNSUPPORTED:
6780 return "Not supported";
6782 case DSERR_NODRIVER:
6785 case DSERR_ALREADYINITIALIZED:
6786 return "Already initialized";
6788 case DSERR_NOAGGREGATION:
6789 return "No aggregation";
6791 case DSERR_BUFFERLOST:
6792 return "Buffer lost";
6794 case DSERR_OTHERAPPHASPRIO:
6795 return "Another application already has priority";
6797 case DSERR_UNINITIALIZED:
6798 return "Uninitialized";
6801 return "DirectSound unknown error";
6804 //******************** End of __WINDOWS_DS__ *********************//
6808 #if defined(__LINUX_ALSA__)
6810 #include <alsa/asoundlib.h>
6813 // A structure to hold various information related to the ALSA API
6816 snd_pcm_t *handles[2];
6819 pthread_cond_t runnable_cv;
6823 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6826 static void *alsaCallbackHandler( void * ptr );
6828 RtApiAlsa :: RtApiAlsa()
6830 // Nothing to do here.
6833 RtApiAlsa :: ~RtApiAlsa()
6835 if ( stream_.state != STREAM_CLOSED ) closeStream();
6838 unsigned int RtApiAlsa :: getDeviceCount( void )
6840 unsigned nDevices = 0;
6841 int result, subdevice, card;
6845 // Count cards and devices
6847 snd_card_next( &card );
6848 while ( card >= 0 ) {
6849 sprintf( name, "hw:%d", card );
6850 result = snd_ctl_open( &handle, name, 0 );
6852 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6853 errorText_ = errorStream_.str();
6854 error( RtAudioError::WARNING );
6859 result = snd_ctl_pcm_next_device( handle, &subdevice );
6861 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6862 errorText_ = errorStream_.str();
6863 error( RtAudioError::WARNING );
6866 if ( subdevice < 0 )
6871 snd_ctl_close( handle );
6872 snd_card_next( &card );
6875 result = snd_ctl_open( &handle, "default", 0 );
6878 snd_ctl_close( handle );
6884 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6886 RtAudio::DeviceInfo info;
6887 info.probed = false;
6889 unsigned nDevices = 0;
6890 int result, subdevice, card;
6894 // Count cards and devices
6897 snd_card_next( &card );
6898 while ( card >= 0 ) {
6899 sprintf( name, "hw:%d", card );
6900 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6902 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6903 errorText_ = errorStream_.str();
6904 error( RtAudioError::WARNING );
6909 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6911 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6912 errorText_ = errorStream_.str();
6913 error( RtAudioError::WARNING );
6916 if ( subdevice < 0 ) break;
6917 if ( nDevices == device ) {
6918 sprintf( name, "hw:%d,%d", card, subdevice );
6924 snd_ctl_close( chandle );
6925 snd_card_next( &card );
6928 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6929 if ( result == 0 ) {
6930 if ( nDevices == device ) {
6931 strcpy( name, "default" );
6937 if ( nDevices == 0 ) {
6938 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6939 error( RtAudioError::INVALID_USE );
6943 if ( device >= nDevices ) {
6944 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6945 error( RtAudioError::INVALID_USE );
6951 // If a stream is already open, we cannot probe the stream devices.
6952 // Thus, use the saved results.
6953 if ( stream_.state != STREAM_CLOSED &&
6954 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6955 snd_ctl_close( chandle );
6956 if ( device >= devices_.size() ) {
6957 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6958 error( RtAudioError::WARNING );
6961 return devices_[ device ];
6964 int openMode = SND_PCM_ASYNC;
6965 snd_pcm_stream_t stream;
6966 snd_pcm_info_t *pcminfo;
6967 snd_pcm_info_alloca( &pcminfo );
6969 snd_pcm_hw_params_t *params;
6970 snd_pcm_hw_params_alloca( ¶ms );
6972 // First try for playback unless default device (which has subdev -1)
6973 stream = SND_PCM_STREAM_PLAYBACK;
6974 snd_pcm_info_set_stream( pcminfo, stream );
6975 if ( subdevice != -1 ) {
6976 snd_pcm_info_set_device( pcminfo, subdevice );
6977 snd_pcm_info_set_subdevice( pcminfo, 0 );
6979 result = snd_ctl_pcm_info( chandle, pcminfo );
6981 // Device probably doesn't support playback.
6986 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6988 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6989 errorText_ = errorStream_.str();
6990 error( RtAudioError::WARNING );
6994 // The device is open ... fill the parameter structure.
6995 result = snd_pcm_hw_params_any( phandle, params );
6997 snd_pcm_close( phandle );
6998 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6999 errorText_ = errorStream_.str();
7000 error( RtAudioError::WARNING );
7004 // Get output channel information.
7006 result = snd_pcm_hw_params_get_channels_max( params, &value );
7008 snd_pcm_close( phandle );
7009 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7010 errorText_ = errorStream_.str();
7011 error( RtAudioError::WARNING );
7014 info.outputChannels = value;
7015 snd_pcm_close( phandle );
7018 stream = SND_PCM_STREAM_CAPTURE;
7019 snd_pcm_info_set_stream( pcminfo, stream );
7021 // Now try for capture unless default device (with subdev = -1)
7022 if ( subdevice != -1 ) {
7023 result = snd_ctl_pcm_info( chandle, pcminfo );
7024 snd_ctl_close( chandle );
7026 // Device probably doesn't support capture.
7027 if ( info.outputChannels == 0 ) return info;
7028 goto probeParameters;
7032 snd_ctl_close( chandle );
7034 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7036 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7037 errorText_ = errorStream_.str();
7038 error( RtAudioError::WARNING );
7039 if ( info.outputChannels == 0 ) return info;
7040 goto probeParameters;
7043 // The device is open ... fill the parameter structure.
7044 result = snd_pcm_hw_params_any( phandle, params );
7046 snd_pcm_close( phandle );
7047 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7048 errorText_ = errorStream_.str();
7049 error( RtAudioError::WARNING );
7050 if ( info.outputChannels == 0 ) return info;
7051 goto probeParameters;
7054 result = snd_pcm_hw_params_get_channels_max( params, &value );
7056 snd_pcm_close( phandle );
7057 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7058 errorText_ = errorStream_.str();
7059 error( RtAudioError::WARNING );
7060 if ( info.outputChannels == 0 ) return info;
7061 goto probeParameters;
7063 info.inputChannels = value;
7064 snd_pcm_close( phandle );
7066 // If device opens for both playback and capture, we determine the channels.
7067 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7068 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7070 // ALSA doesn't provide default devices so we'll use the first available one.
7071 if ( device == 0 && info.outputChannels > 0 )
7072 info.isDefaultOutput = true;
7073 if ( device == 0 && info.inputChannels > 0 )
7074 info.isDefaultInput = true;
7077 // At this point, we just need to figure out the supported data
7078 // formats and sample rates. We'll proceed by opening the device in
7079 // the direction with the maximum number of channels, or playback if
7080 // they are equal. This might limit our sample rate options, but so
7083 if ( info.outputChannels >= info.inputChannels )
7084 stream = SND_PCM_STREAM_PLAYBACK;
7086 stream = SND_PCM_STREAM_CAPTURE;
7087 snd_pcm_info_set_stream( pcminfo, stream );
7089 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7091 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7092 errorText_ = errorStream_.str();
7093 error( RtAudioError::WARNING );
7097 // The device is open ... fill the parameter structure.
7098 result = snd_pcm_hw_params_any( phandle, params );
7100 snd_pcm_close( phandle );
7101 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7102 errorText_ = errorStream_.str();
7103 error( RtAudioError::WARNING );
7107 // Test our discrete set of sample rate values.
7108 info.sampleRates.clear();
7109 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7110 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7111 info.sampleRates.push_back( SAMPLE_RATES[i] );
7113 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7114 info.preferredSampleRate = SAMPLE_RATES[i];
7117 if ( info.sampleRates.size() == 0 ) {
7118 snd_pcm_close( phandle );
7119 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7120 errorText_ = errorStream_.str();
7121 error( RtAudioError::WARNING );
7125 // Probe the supported data formats ... we don't care about endian-ness just yet
7126 snd_pcm_format_t format;
7127 info.nativeFormats = 0;
7128 format = SND_PCM_FORMAT_S8;
7129 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7130 info.nativeFormats |= RTAUDIO_SINT8;
7131 format = SND_PCM_FORMAT_S16;
7132 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7133 info.nativeFormats |= RTAUDIO_SINT16;
7134 format = SND_PCM_FORMAT_S24;
7135 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7136 info.nativeFormats |= RTAUDIO_SINT24;
7137 format = SND_PCM_FORMAT_S32;
7138 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7139 info.nativeFormats |= RTAUDIO_SINT32;
7140 format = SND_PCM_FORMAT_FLOAT;
7141 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7142 info.nativeFormats |= RTAUDIO_FLOAT32;
7143 format = SND_PCM_FORMAT_FLOAT64;
7144 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7145 info.nativeFormats |= RTAUDIO_FLOAT64;
7147 // Check that we have at least one supported format
7148 if ( info.nativeFormats == 0 ) {
7149 snd_pcm_close( phandle );
7150 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7151 errorText_ = errorStream_.str();
7152 error( RtAudioError::WARNING );
7156 // Get the device name
7158 result = snd_card_get_name( card, &cardname );
7159 if ( result >= 0 ) {
7160 sprintf( name, "hw:%s,%d", cardname, subdevice );
7165 // That's all ... close the device and return
7166 snd_pcm_close( phandle );
7171 void RtApiAlsa :: saveDeviceInfo( void )
7175 unsigned int nDevices = getDeviceCount();
7176 devices_.resize( nDevices );
7177 for ( unsigned int i=0; i<nDevices; i++ )
7178 devices_[i] = getDeviceInfo( i );
7181 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7182 unsigned int firstChannel, unsigned int sampleRate,
7183 RtAudioFormat format, unsigned int *bufferSize,
7184 RtAudio::StreamOptions *options )
7187 #if defined(__RTAUDIO_DEBUG__)
7189 snd_output_stdio_attach(&out, stderr, 0);
7192 // I'm not using the "plug" interface ... too much inconsistent behavior.
7194 unsigned nDevices = 0;
7195 int result, subdevice, card;
7199 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7200 snprintf(name, sizeof(name), "%s", "default");
7202 // Count cards and devices
7204 snd_card_next( &card );
7205 while ( card >= 0 ) {
7206 sprintf( name, "hw:%d", card );
7207 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7209 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7210 errorText_ = errorStream_.str();
7215 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7216 if ( result < 0 ) break;
7217 if ( subdevice < 0 ) break;
7218 if ( nDevices == device ) {
7219 sprintf( name, "hw:%d,%d", card, subdevice );
7220 snd_ctl_close( chandle );
7225 snd_ctl_close( chandle );
7226 snd_card_next( &card );
7229 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7230 if ( result == 0 ) {
7231 if ( nDevices == device ) {
7232 strcpy( name, "default" );
7238 if ( nDevices == 0 ) {
7239 // This should not happen because a check is made before this function is called.
7240 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7244 if ( device >= nDevices ) {
7245 // This should not happen because a check is made before this function is called.
7246 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7253 // The getDeviceInfo() function will not work for a device that is
7254 // already open. Thus, we'll probe the system before opening a
7255 // stream and save the results for use by getDeviceInfo().
7256 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7257 this->saveDeviceInfo();
7259 snd_pcm_stream_t stream;
7260 if ( mode == OUTPUT )
7261 stream = SND_PCM_STREAM_PLAYBACK;
7263 stream = SND_PCM_STREAM_CAPTURE;
7266 int openMode = SND_PCM_ASYNC;
7267 result = snd_pcm_open( &phandle, name, stream, openMode );
7269 if ( mode == OUTPUT )
7270 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7272 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7273 errorText_ = errorStream_.str();
7277 // Fill the parameter structure.
7278 snd_pcm_hw_params_t *hw_params;
7279 snd_pcm_hw_params_alloca( &hw_params );
7280 result = snd_pcm_hw_params_any( phandle, hw_params );
7282 snd_pcm_close( phandle );
7283 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7284 errorText_ = errorStream_.str();
7288 #if defined(__RTAUDIO_DEBUG__)
7289 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7290 snd_pcm_hw_params_dump( hw_params, out );
7293 // Set access ... check user preference.
7294 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7295 stream_.userInterleaved = false;
7296 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7298 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7299 stream_.deviceInterleaved[mode] = true;
7302 stream_.deviceInterleaved[mode] = false;
7305 stream_.userInterleaved = true;
7306 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7308 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7309 stream_.deviceInterleaved[mode] = false;
7312 stream_.deviceInterleaved[mode] = true;
7316 snd_pcm_close( phandle );
7317 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7318 errorText_ = errorStream_.str();
7322 // Determine how to set the device format.
7323 stream_.userFormat = format;
7324 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7326 if ( format == RTAUDIO_SINT8 )
7327 deviceFormat = SND_PCM_FORMAT_S8;
7328 else if ( format == RTAUDIO_SINT16 )
7329 deviceFormat = SND_PCM_FORMAT_S16;
7330 else if ( format == RTAUDIO_SINT24 )
7331 deviceFormat = SND_PCM_FORMAT_S24;
7332 else if ( format == RTAUDIO_SINT32 )
7333 deviceFormat = SND_PCM_FORMAT_S32;
7334 else if ( format == RTAUDIO_FLOAT32 )
7335 deviceFormat = SND_PCM_FORMAT_FLOAT;
7336 else if ( format == RTAUDIO_FLOAT64 )
7337 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7339 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7340 stream_.deviceFormat[mode] = format;
7344 // The user requested format is not natively supported by the device.
7345 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7346 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7347 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7351 deviceFormat = SND_PCM_FORMAT_FLOAT;
7352 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7353 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7357 deviceFormat = SND_PCM_FORMAT_S32;
7358 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7359 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7363 deviceFormat = SND_PCM_FORMAT_S24;
7364 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7365 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7369 deviceFormat = SND_PCM_FORMAT_S16;
7370 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7371 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7375 deviceFormat = SND_PCM_FORMAT_S8;
7376 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7377 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7381 // If we get here, no supported format was found.
7382 snd_pcm_close( phandle );
7383 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7384 errorText_ = errorStream_.str();
7388 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7390 snd_pcm_close( phandle );
7391 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7392 errorText_ = errorStream_.str();
7396 // Determine whether byte-swaping is necessary.
7397 stream_.doByteSwap[mode] = false;
7398 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7399 result = snd_pcm_format_cpu_endian( deviceFormat );
7401 stream_.doByteSwap[mode] = true;
7402 else if (result < 0) {
7403 snd_pcm_close( phandle );
7404 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7405 errorText_ = errorStream_.str();
7410 // Set the sample rate.
7411 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7413 snd_pcm_close( phandle );
7414 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7415 errorText_ = errorStream_.str();
7419 // Determine the number of channels for this device. We support a possible
7420 // minimum device channel number > than the value requested by the user.
7421 stream_.nUserChannels[mode] = channels;
7423 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7424 unsigned int deviceChannels = value;
7425 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7426 snd_pcm_close( phandle );
7427 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7428 errorText_ = errorStream_.str();
7432 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7434 snd_pcm_close( phandle );
7435 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7436 errorText_ = errorStream_.str();
7439 deviceChannels = value;
7440 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7441 stream_.nDeviceChannels[mode] = deviceChannels;
7443 // Set the device channels.
7444 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7446 snd_pcm_close( phandle );
7447 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7448 errorText_ = errorStream_.str();
7452 // Set the buffer (or period) size.
7454 snd_pcm_uframes_t periodSize = *bufferSize;
7455 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7457 snd_pcm_close( phandle );
7458 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7459 errorText_ = errorStream_.str();
7462 *bufferSize = periodSize;
7464 // Set the buffer number, which in ALSA is referred to as the "period".
7465 unsigned int periods = 0;
7466 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7467 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7468 if ( periods < 2 ) periods = 4; // a fairly safe default value
7469 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7471 snd_pcm_close( phandle );
7472 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7473 errorText_ = errorStream_.str();
7477 // If attempting to setup a duplex stream, the bufferSize parameter
7478 // MUST be the same in both directions!
7479 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7480 snd_pcm_close( phandle );
7481 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7482 errorText_ = errorStream_.str();
7486 stream_.bufferSize = *bufferSize;
7488 // Install the hardware configuration
7489 result = snd_pcm_hw_params( phandle, hw_params );
7491 snd_pcm_close( phandle );
7492 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7493 errorText_ = errorStream_.str();
7497 #if defined(__RTAUDIO_DEBUG__)
7498 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7499 snd_pcm_hw_params_dump( hw_params, out );
7502 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7503 snd_pcm_sw_params_t *sw_params = NULL;
7504 snd_pcm_sw_params_alloca( &sw_params );
7505 snd_pcm_sw_params_current( phandle, sw_params );
7506 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7507 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7508 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7510 // The following two settings were suggested by Theo Veenker
7511 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7512 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7514 // here are two options for a fix
7515 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7516 snd_pcm_uframes_t val;
7517 snd_pcm_sw_params_get_boundary( sw_params, &val );
7518 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7520 result = snd_pcm_sw_params( phandle, sw_params );
7522 snd_pcm_close( phandle );
7523 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7524 errorText_ = errorStream_.str();
7528 #if defined(__RTAUDIO_DEBUG__)
7529 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7530 snd_pcm_sw_params_dump( sw_params, out );
7533 // Set flags for buffer conversion
7534 stream_.doConvertBuffer[mode] = false;
7535 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7536 stream_.doConvertBuffer[mode] = true;
7537 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7538 stream_.doConvertBuffer[mode] = true;
7539 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7540 stream_.nUserChannels[mode] > 1 )
7541 stream_.doConvertBuffer[mode] = true;
7543 // Allocate the ApiHandle if necessary and then save.
7544 AlsaHandle *apiInfo = 0;
7545 if ( stream_.apiHandle == 0 ) {
7547 apiInfo = (AlsaHandle *) new AlsaHandle;
7549 catch ( std::bad_alloc& ) {
7550 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7554 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7555 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7559 stream_.apiHandle = (void *) apiInfo;
7560 apiInfo->handles[0] = 0;
7561 apiInfo->handles[1] = 0;
7564 apiInfo = (AlsaHandle *) stream_.apiHandle;
7566 apiInfo->handles[mode] = phandle;
7569 // Allocate necessary internal buffers.
7570 unsigned long bufferBytes;
7571 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7572 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7573 if ( stream_.userBuffer[mode] == NULL ) {
7574 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7578 if ( stream_.doConvertBuffer[mode] ) {
7580 bool makeBuffer = true;
7581 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7582 if ( mode == INPUT ) {
7583 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7584 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7585 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7590 bufferBytes *= *bufferSize;
7591 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7592 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7593 if ( stream_.deviceBuffer == NULL ) {
7594 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7600 stream_.sampleRate = sampleRate;
7601 stream_.nBuffers = periods;
7602 stream_.device[mode] = device;
7603 stream_.state = STREAM_STOPPED;
7605 // Setup the buffer conversion information structure.
7606 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7608 // Setup thread if necessary.
7609 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7610 // We had already set up an output stream.
7611 stream_.mode = DUPLEX;
7612 // Link the streams if possible.
7613 apiInfo->synchronized = false;
7614 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7615 apiInfo->synchronized = true;
7617 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7618 error( RtAudioError::WARNING );
7622 stream_.mode = mode;
7624 // Setup callback thread.
7625 stream_.callbackInfo.object = (void *) this;
7627 // Set the thread attributes for joinable and realtime scheduling
7628 // priority (optional). The higher priority will only take affect
7629 // if the program is run as root or suid. Note, under Linux
7630 // processes with CAP_SYS_NICE privilege, a user can change
7631 // scheduling policy and priority (thus need not be root). See
7632 // POSIX "capabilities".
7633 pthread_attr_t attr;
7634 pthread_attr_init( &attr );
7635 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7636 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7637 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7638 stream_.callbackInfo.doRealtime = true;
7639 struct sched_param param;
7640 int priority = options->priority;
7641 int min = sched_get_priority_min( SCHED_RR );
7642 int max = sched_get_priority_max( SCHED_RR );
7643 if ( priority < min ) priority = min;
7644 else if ( priority > max ) priority = max;
7645 param.sched_priority = priority;
7647 // Set the policy BEFORE the priority. Otherwise it fails.
7648 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7649 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7650 // This is definitely required. Otherwise it fails.
7651 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7652 pthread_attr_setschedparam(&attr, ¶m);
7655 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7657 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7660 stream_.callbackInfo.isRunning = true;
7661 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7662 pthread_attr_destroy( &attr );
7664 // Failed. Try instead with default attributes.
7665 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7667 stream_.callbackInfo.isRunning = false;
7668 errorText_ = "RtApiAlsa::error creating callback thread!";
7678 pthread_cond_destroy( &apiInfo->runnable_cv );
7679 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7680 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7682 stream_.apiHandle = 0;
7685 if ( phandle) snd_pcm_close( phandle );
7687 for ( int i=0; i<2; i++ ) {
7688 if ( stream_.userBuffer[i] ) {
7689 free( stream_.userBuffer[i] );
7690 stream_.userBuffer[i] = 0;
7694 if ( stream_.deviceBuffer ) {
7695 free( stream_.deviceBuffer );
7696 stream_.deviceBuffer = 0;
7699 stream_.state = STREAM_CLOSED;
7703 void RtApiAlsa :: closeStream()
7705 if ( stream_.state == STREAM_CLOSED ) {
7706 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7707 error( RtAudioError::WARNING );
7711 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7712 stream_.callbackInfo.isRunning = false;
7713 MUTEX_LOCK( &stream_.mutex );
7714 if ( stream_.state == STREAM_STOPPED ) {
7715 apiInfo->runnable = true;
7716 pthread_cond_signal( &apiInfo->runnable_cv );
7718 MUTEX_UNLOCK( &stream_.mutex );
7719 pthread_join( stream_.callbackInfo.thread, NULL );
7721 if ( stream_.state == STREAM_RUNNING ) {
7722 stream_.state = STREAM_STOPPED;
7723 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7724 snd_pcm_drop( apiInfo->handles[0] );
7725 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7726 snd_pcm_drop( apiInfo->handles[1] );
7730 pthread_cond_destroy( &apiInfo->runnable_cv );
7731 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7732 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7734 stream_.apiHandle = 0;
7737 for ( int i=0; i<2; i++ ) {
7738 if ( stream_.userBuffer[i] ) {
7739 free( stream_.userBuffer[i] );
7740 stream_.userBuffer[i] = 0;
7744 if ( stream_.deviceBuffer ) {
7745 free( stream_.deviceBuffer );
7746 stream_.deviceBuffer = 0;
7749 stream_.mode = UNINITIALIZED;
7750 stream_.state = STREAM_CLOSED;
7753 void RtApiAlsa :: startStream()
7755 // This method calls snd_pcm_prepare if the device isn't already in that state.
7758 if ( stream_.state == STREAM_RUNNING ) {
7759 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7760 error( RtAudioError::WARNING );
7764 MUTEX_LOCK( &stream_.mutex );
7767 snd_pcm_state_t state;
7768 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7769 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7770 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7771 state = snd_pcm_state( handle[0] );
7772 if ( state != SND_PCM_STATE_PREPARED ) {
7773 result = snd_pcm_prepare( handle[0] );
7775 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7776 errorText_ = errorStream_.str();
7782 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7783 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7784 state = snd_pcm_state( handle[1] );
7785 if ( state != SND_PCM_STATE_PREPARED ) {
7786 result = snd_pcm_prepare( handle[1] );
7788 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7789 errorText_ = errorStream_.str();
7795 stream_.state = STREAM_RUNNING;
7798 apiInfo->runnable = true;
7799 pthread_cond_signal( &apiInfo->runnable_cv );
7800 MUTEX_UNLOCK( &stream_.mutex );
7802 if ( result >= 0 ) return;
7803 error( RtAudioError::SYSTEM_ERROR );
7806 void RtApiAlsa :: stopStream()
7809 if ( stream_.state == STREAM_STOPPED ) {
7810 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7811 error( RtAudioError::WARNING );
7815 stream_.state = STREAM_STOPPED;
7816 MUTEX_LOCK( &stream_.mutex );
7819 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7820 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7821 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7822 if ( apiInfo->synchronized )
7823 result = snd_pcm_drop( handle[0] );
7825 result = snd_pcm_drain( handle[0] );
7827 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7828 errorText_ = errorStream_.str();
7833 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7834 result = snd_pcm_drop( handle[1] );
7836 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7837 errorText_ = errorStream_.str();
7843 apiInfo->runnable = false; // fixes high CPU usage when stopped
7844 MUTEX_UNLOCK( &stream_.mutex );
7846 if ( result >= 0 ) return;
7847 error( RtAudioError::SYSTEM_ERROR );
7850 void RtApiAlsa :: abortStream()
7853 if ( stream_.state == STREAM_STOPPED ) {
7854 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7855 error( RtAudioError::WARNING );
7859 stream_.state = STREAM_STOPPED;
7860 MUTEX_LOCK( &stream_.mutex );
7863 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7864 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7865 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7866 result = snd_pcm_drop( handle[0] );
7868 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7869 errorText_ = errorStream_.str();
7874 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7875 result = snd_pcm_drop( handle[1] );
7877 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7878 errorText_ = errorStream_.str();
7884 apiInfo->runnable = false; // fixes high CPU usage when stopped
7885 MUTEX_UNLOCK( &stream_.mutex );
7887 if ( result >= 0 ) return;
7888 error( RtAudioError::SYSTEM_ERROR );
7891 void RtApiAlsa :: callbackEvent()
7893 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7894 if ( stream_.state == STREAM_STOPPED ) {
7895 MUTEX_LOCK( &stream_.mutex );
7896 while ( !apiInfo->runnable )
7897 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7899 if ( stream_.state != STREAM_RUNNING ) {
7900 MUTEX_UNLOCK( &stream_.mutex );
7903 MUTEX_UNLOCK( &stream_.mutex );
7906 if ( stream_.state == STREAM_CLOSED ) {
7907 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7908 error( RtAudioError::WARNING );
7912 int doStopStream = 0;
7913 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7914 double streamTime = getStreamTime();
7915 RtAudioStreamStatus status = 0;
7916 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7917 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7918 apiInfo->xrun[0] = false;
7920 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7921 status |= RTAUDIO_INPUT_OVERFLOW;
7922 apiInfo->xrun[1] = false;
7924 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7925 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7927 if ( doStopStream == 2 ) {
7932 MUTEX_LOCK( &stream_.mutex );
7934 // The state might change while waiting on a mutex.
7935 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7941 snd_pcm_sframes_t frames;
7942 RtAudioFormat format;
7943 handle = (snd_pcm_t **) apiInfo->handles;
7945 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7947 // Setup parameters.
7948 if ( stream_.doConvertBuffer[1] ) {
7949 buffer = stream_.deviceBuffer;
7950 channels = stream_.nDeviceChannels[1];
7951 format = stream_.deviceFormat[1];
7954 buffer = stream_.userBuffer[1];
7955 channels = stream_.nUserChannels[1];
7956 format = stream_.userFormat;
7959 // Read samples from device in interleaved/non-interleaved format.
7960 if ( stream_.deviceInterleaved[1] )
7961 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7963 void *bufs[channels];
7964 size_t offset = stream_.bufferSize * formatBytes( format );
7965 for ( int i=0; i<channels; i++ )
7966 bufs[i] = (void *) (buffer + (i * offset));
7967 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7970 if ( result < (int) stream_.bufferSize ) {
7971 // Either an error or overrun occured.
7972 if ( result == -EPIPE ) {
7973 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7974 if ( state == SND_PCM_STATE_XRUN ) {
7975 apiInfo->xrun[1] = true;
7976 result = snd_pcm_prepare( handle[1] );
7978 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7979 errorText_ = errorStream_.str();
7983 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7984 errorText_ = errorStream_.str();
7988 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7989 errorText_ = errorStream_.str();
7991 error( RtAudioError::WARNING );
7995 // Do byte swapping if necessary.
7996 if ( stream_.doByteSwap[1] )
7997 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7999 // Do buffer conversion if necessary.
8000 if ( stream_.doConvertBuffer[1] )
8001 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8003 // Check stream latency
8004 result = snd_pcm_delay( handle[1], &frames );
8005 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8010 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8012 // Setup parameters and do buffer conversion if necessary.
8013 if ( stream_.doConvertBuffer[0] ) {
8014 buffer = stream_.deviceBuffer;
8015 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8016 channels = stream_.nDeviceChannels[0];
8017 format = stream_.deviceFormat[0];
8020 buffer = stream_.userBuffer[0];
8021 channels = stream_.nUserChannels[0];
8022 format = stream_.userFormat;
8025 // Do byte swapping if necessary.
8026 if ( stream_.doByteSwap[0] )
8027 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8029 // Write samples to device in interleaved/non-interleaved format.
8030 if ( stream_.deviceInterleaved[0] )
8031 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8033 void *bufs[channels];
8034 size_t offset = stream_.bufferSize * formatBytes( format );
8035 for ( int i=0; i<channels; i++ )
8036 bufs[i] = (void *) (buffer + (i * offset));
8037 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8040 if ( result < (int) stream_.bufferSize ) {
8041 // Either an error or underrun occured.
8042 if ( result == -EPIPE ) {
8043 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8044 if ( state == SND_PCM_STATE_XRUN ) {
8045 apiInfo->xrun[0] = true;
8046 result = snd_pcm_prepare( handle[0] );
8048 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8049 errorText_ = errorStream_.str();
8052 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8055 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8056 errorText_ = errorStream_.str();
8060 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8061 errorText_ = errorStream_.str();
8063 error( RtAudioError::WARNING );
8067 // Check stream latency
8068 result = snd_pcm_delay( handle[0], &frames );
8069 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8073 MUTEX_UNLOCK( &stream_.mutex );
8075 RtApi::tickStreamTime();
8076 if ( doStopStream == 1 ) this->stopStream();
8079 static void *alsaCallbackHandler( void *ptr )
8081 CallbackInfo *info = (CallbackInfo *) ptr;
8082 RtApiAlsa *object = (RtApiAlsa *) info->object;
8083 bool *isRunning = &info->isRunning;
8085 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8086 if ( info->doRealtime ) {
8087 std::cerr << "RtAudio alsa: " <<
8088 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8089 "running realtime scheduling" << std::endl;
8093 while ( *isRunning == true ) {
8094 pthread_testcancel();
8095 object->callbackEvent();
8098 pthread_exit( NULL );
8101 //******************** End of __LINUX_ALSA__ *********************//
8104 #if defined(__LINUX_PULSE__)
8106 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8107 // and Tristan Matthews.
8109 #include <pulse/error.h>
8110 #include <pulse/simple.h>
8113 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8114 44100, 48000, 96000, 0};
8116 struct rtaudio_pa_format_mapping_t {
8117 RtAudioFormat rtaudio_format;
8118 pa_sample_format_t pa_format;
8121 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8122 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8123 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8124 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8125 {0, PA_SAMPLE_INVALID}};
8127 struct PulseAudioHandle {
8131 pthread_cond_t runnable_cv;
8133 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8136 RtApiPulse::~RtApiPulse()
8138 if ( stream_.state != STREAM_CLOSED )
8142 unsigned int RtApiPulse::getDeviceCount( void )
8147 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8149 RtAudio::DeviceInfo info;
8151 info.name = "PulseAudio";
8152 info.outputChannels = 2;
8153 info.inputChannels = 2;
8154 info.duplexChannels = 2;
8155 info.isDefaultOutput = true;
8156 info.isDefaultInput = true;
8158 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8159 info.sampleRates.push_back( *sr );
8161 info.preferredSampleRate = 48000;
8162 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8167 static void *pulseaudio_callback( void * user )
8169 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8170 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8171 volatile bool *isRunning = &cbi->isRunning;
8173 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8174 if (cbi->doRealtime) {
8175 std::cerr << "RtAudio pulse: " <<
8176 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8177 "running realtime scheduling" << std::endl;
8181 while ( *isRunning ) {
8182 pthread_testcancel();
8183 context->callbackEvent();
8186 pthread_exit( NULL );
8189 void RtApiPulse::closeStream( void )
8191 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8193 stream_.callbackInfo.isRunning = false;
8195 MUTEX_LOCK( &stream_.mutex );
8196 if ( stream_.state == STREAM_STOPPED ) {
8197 pah->runnable = true;
8198 pthread_cond_signal( &pah->runnable_cv );
8200 MUTEX_UNLOCK( &stream_.mutex );
8202 pthread_join( pah->thread, 0 );
8203 if ( pah->s_play ) {
8204 pa_simple_flush( pah->s_play, NULL );
8205 pa_simple_free( pah->s_play );
8208 pa_simple_free( pah->s_rec );
8210 pthread_cond_destroy( &pah->runnable_cv );
8212 stream_.apiHandle = 0;
8215 if ( stream_.userBuffer[0] ) {
8216 free( stream_.userBuffer[0] );
8217 stream_.userBuffer[0] = 0;
8219 if ( stream_.userBuffer[1] ) {
8220 free( stream_.userBuffer[1] );
8221 stream_.userBuffer[1] = 0;
8224 stream_.state = STREAM_CLOSED;
8225 stream_.mode = UNINITIALIZED;
8228 void RtApiPulse::callbackEvent( void )
8230 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8232 if ( stream_.state == STREAM_STOPPED ) {
8233 MUTEX_LOCK( &stream_.mutex );
8234 while ( !pah->runnable )
8235 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8237 if ( stream_.state != STREAM_RUNNING ) {
8238 MUTEX_UNLOCK( &stream_.mutex );
8241 MUTEX_UNLOCK( &stream_.mutex );
8244 if ( stream_.state == STREAM_CLOSED ) {
8245 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8246 "this shouldn't happen!";
8247 error( RtAudioError::WARNING );
8251 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8252 double streamTime = getStreamTime();
8253 RtAudioStreamStatus status = 0;
8254 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8255 stream_.bufferSize, streamTime, status,
8256 stream_.callbackInfo.userData );
8258 if ( doStopStream == 2 ) {
8263 MUTEX_LOCK( &stream_.mutex );
8264 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8265 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8267 if ( stream_.state != STREAM_RUNNING )
8272 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8273 if ( stream_.doConvertBuffer[OUTPUT] ) {
8274 convertBuffer( stream_.deviceBuffer,
8275 stream_.userBuffer[OUTPUT],
8276 stream_.convertInfo[OUTPUT] );
8277 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8278 formatBytes( stream_.deviceFormat[OUTPUT] );
8280 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8281 formatBytes( stream_.userFormat );
8283 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8284 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8285 pa_strerror( pa_error ) << ".";
8286 errorText_ = errorStream_.str();
8287 error( RtAudioError::WARNING );
8291 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8292 if ( stream_.doConvertBuffer[INPUT] )
8293 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8294 formatBytes( stream_.deviceFormat[INPUT] );
8296 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8297 formatBytes( stream_.userFormat );
8299 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8300 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8301 pa_strerror( pa_error ) << ".";
8302 errorText_ = errorStream_.str();
8303 error( RtAudioError::WARNING );
8305 if ( stream_.doConvertBuffer[INPUT] ) {
8306 convertBuffer( stream_.userBuffer[INPUT],
8307 stream_.deviceBuffer,
8308 stream_.convertInfo[INPUT] );
8313 MUTEX_UNLOCK( &stream_.mutex );
8314 RtApi::tickStreamTime();
8316 if ( doStopStream == 1 )
8320 void RtApiPulse::startStream( void )
8322 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8324 if ( stream_.state == STREAM_CLOSED ) {
8325 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8326 error( RtAudioError::INVALID_USE );
8329 if ( stream_.state == STREAM_RUNNING ) {
8330 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8331 error( RtAudioError::WARNING );
8335 MUTEX_LOCK( &stream_.mutex );
8337 stream_.state = STREAM_RUNNING;
8339 pah->runnable = true;
8340 pthread_cond_signal( &pah->runnable_cv );
8341 MUTEX_UNLOCK( &stream_.mutex );
8344 void RtApiPulse::stopStream( void )
8346 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8348 if ( stream_.state == STREAM_CLOSED ) {
8349 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8350 error( RtAudioError::INVALID_USE );
8353 if ( stream_.state == STREAM_STOPPED ) {
8354 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8355 error( RtAudioError::WARNING );
8359 stream_.state = STREAM_STOPPED;
8360 MUTEX_LOCK( &stream_.mutex );
8362 if ( pah && pah->s_play ) {
8364 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8365 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8366 pa_strerror( pa_error ) << ".";
8367 errorText_ = errorStream_.str();
8368 MUTEX_UNLOCK( &stream_.mutex );
8369 error( RtAudioError::SYSTEM_ERROR );
8374 stream_.state = STREAM_STOPPED;
8375 MUTEX_UNLOCK( &stream_.mutex );
8378 void RtApiPulse::abortStream( void )
8380 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8382 if ( stream_.state == STREAM_CLOSED ) {
8383 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8384 error( RtAudioError::INVALID_USE );
8387 if ( stream_.state == STREAM_STOPPED ) {
8388 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8389 error( RtAudioError::WARNING );
8393 stream_.state = STREAM_STOPPED;
8394 MUTEX_LOCK( &stream_.mutex );
8396 if ( pah && pah->s_play ) {
8398 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8399 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8400 pa_strerror( pa_error ) << ".";
8401 errorText_ = errorStream_.str();
8402 MUTEX_UNLOCK( &stream_.mutex );
8403 error( RtAudioError::SYSTEM_ERROR );
8408 stream_.state = STREAM_STOPPED;
8409 MUTEX_UNLOCK( &stream_.mutex );
8412 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8413 unsigned int channels, unsigned int firstChannel,
8414 unsigned int sampleRate, RtAudioFormat format,
8415 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8417 PulseAudioHandle *pah = 0;
8418 unsigned long bufferBytes = 0;
8421 if ( device != 0 ) return false;
8422 if ( mode != INPUT && mode != OUTPUT ) return false;
8423 if ( channels != 1 && channels != 2 ) {
8424 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8427 ss.channels = channels;
8429 if ( firstChannel != 0 ) return false;
8431 bool sr_found = false;
8432 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8433 if ( sampleRate == *sr ) {
8435 stream_.sampleRate = sampleRate;
8436 ss.rate = sampleRate;
8441 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8446 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8447 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8448 if ( format == sf->rtaudio_format ) {
8450 stream_.userFormat = sf->rtaudio_format;
8451 stream_.deviceFormat[mode] = stream_.userFormat;
8452 ss.format = sf->pa_format;
8456 if ( !sf_found ) { // Use internal data format conversion.
8457 stream_.userFormat = format;
8458 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8459 ss.format = PA_SAMPLE_FLOAT32LE;
8462 // Set other stream parameters.
8463 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8464 else stream_.userInterleaved = true;
8465 stream_.deviceInterleaved[mode] = true;
8466 stream_.nBuffers = 1;
8467 stream_.doByteSwap[mode] = false;
8468 stream_.nUserChannels[mode] = channels;
8469 stream_.nDeviceChannels[mode] = channels + firstChannel;
8470 stream_.channelOffset[mode] = 0;
8471 std::string streamName = "RtAudio";
8473 // Set flags for buffer conversion.
8474 stream_.doConvertBuffer[mode] = false;
8475 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8476 stream_.doConvertBuffer[mode] = true;
8477 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8478 stream_.doConvertBuffer[mode] = true;
8480 // Allocate necessary internal buffers.
8481 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8482 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8483 if ( stream_.userBuffer[mode] == NULL ) {
8484 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8487 stream_.bufferSize = *bufferSize;
8489 if ( stream_.doConvertBuffer[mode] ) {
8491 bool makeBuffer = true;
8492 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8493 if ( mode == INPUT ) {
8494 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8495 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8496 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8501 bufferBytes *= *bufferSize;
8502 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8503 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8504 if ( stream_.deviceBuffer == NULL ) {
8505 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8511 stream_.device[mode] = device;
8513 // Setup the buffer conversion information structure.
8514 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8516 if ( !stream_.apiHandle ) {
8517 PulseAudioHandle *pah = new PulseAudioHandle;
8519 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8523 stream_.apiHandle = pah;
8524 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8525 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8529 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8532 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8535 pa_buffer_attr buffer_attr;
8536 buffer_attr.fragsize = bufferBytes;
8537 buffer_attr.maxlength = -1;
8539 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8540 if ( !pah->s_rec ) {
8541 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8546 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8547 if ( !pah->s_play ) {
8548 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8556 if ( stream_.mode == UNINITIALIZED )
8557 stream_.mode = mode;
8558 else if ( stream_.mode == mode )
8561 stream_.mode = DUPLEX;
8563 if ( !stream_.callbackInfo.isRunning ) {
8564 stream_.callbackInfo.object = this;
8566 stream_.state = STREAM_STOPPED;
8567 // Set the thread attributes for joinable and realtime scheduling
8568 // priority (optional). The higher priority will only take affect
8569 // if the program is run as root or suid. Note, under Linux
8570 // processes with CAP_SYS_NICE privilege, a user can change
8571 // scheduling policy and priority (thus need not be root). See
8572 // POSIX "capabilities".
8573 pthread_attr_t attr;
8574 pthread_attr_init( &attr );
8575 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8576 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8577 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8578 stream_.callbackInfo.doRealtime = true;
8579 struct sched_param param;
8580 int priority = options->priority;
8581 int min = sched_get_priority_min( SCHED_RR );
8582 int max = sched_get_priority_max( SCHED_RR );
8583 if ( priority < min ) priority = min;
8584 else if ( priority > max ) priority = max;
8585 param.sched_priority = priority;
8587 // Set the policy BEFORE the priority. Otherwise it fails.
8588 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8589 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8590 // This is definitely required. Otherwise it fails.
8591 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8592 pthread_attr_setschedparam(&attr, ¶m);
8595 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8597 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8600 stream_.callbackInfo.isRunning = true;
8601 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8602 pthread_attr_destroy(&attr);
8604 // Failed. Try instead with default attributes.
8605 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8607 stream_.callbackInfo.isRunning = false;
8608 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8617 if ( pah && stream_.callbackInfo.isRunning ) {
8618 pthread_cond_destroy( &pah->runnable_cv );
8620 stream_.apiHandle = 0;
8623 for ( int i=0; i<2; i++ ) {
8624 if ( stream_.userBuffer[i] ) {
8625 free( stream_.userBuffer[i] );
8626 stream_.userBuffer[i] = 0;
8630 if ( stream_.deviceBuffer ) {
8631 free( stream_.deviceBuffer );
8632 stream_.deviceBuffer = 0;
8635 stream_.state = STREAM_CLOSED;
8639 //******************** End of __LINUX_PULSE__ *********************//
8642 #if defined(__LINUX_OSS__)
8645 #include <sys/ioctl.h>
8648 #include <sys/soundcard.h>
8652 static void *ossCallbackHandler(void * ptr);
8654 // A structure to hold various information related to the OSS API
8657 int id[2]; // device ids
8660 pthread_cond_t runnable;
8663 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8666 RtApiOss :: RtApiOss()
8668 // Nothing to do here.
8671 RtApiOss :: ~RtApiOss()
8673 if ( stream_.state != STREAM_CLOSED ) closeStream();
8676 unsigned int RtApiOss :: getDeviceCount( void )
8678 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8679 if ( mixerfd == -1 ) {
8680 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8681 error( RtAudioError::WARNING );
8685 oss_sysinfo sysinfo;
8686 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8688 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8689 error( RtAudioError::WARNING );
8694 return sysinfo.numaudios;
8697 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8699 RtAudio::DeviceInfo info;
8700 info.probed = false;
8702 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8703 if ( mixerfd == -1 ) {
8704 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8705 error( RtAudioError::WARNING );
8709 oss_sysinfo sysinfo;
8710 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8711 if ( result == -1 ) {
8713 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8714 error( RtAudioError::WARNING );
8718 unsigned nDevices = sysinfo.numaudios;
8719 if ( nDevices == 0 ) {
8721 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8722 error( RtAudioError::INVALID_USE );
8726 if ( device >= nDevices ) {
8728 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8729 error( RtAudioError::INVALID_USE );
8733 oss_audioinfo ainfo;
8735 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8737 if ( result == -1 ) {
8738 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8739 errorText_ = errorStream_.str();
8740 error( RtAudioError::WARNING );
8745 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8746 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8747 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8748 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8749 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8752 // Probe data formats ... do for input
8753 unsigned long mask = ainfo.iformats;
8754 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8755 info.nativeFormats |= RTAUDIO_SINT16;
8756 if ( mask & AFMT_S8 )
8757 info.nativeFormats |= RTAUDIO_SINT8;
8758 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8759 info.nativeFormats |= RTAUDIO_SINT32;
8761 if ( mask & AFMT_FLOAT )
8762 info.nativeFormats |= RTAUDIO_FLOAT32;
8764 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8765 info.nativeFormats |= RTAUDIO_SINT24;
8767 // Check that we have at least one supported format
8768 if ( info.nativeFormats == 0 ) {
8769 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8770 errorText_ = errorStream_.str();
8771 error( RtAudioError::WARNING );
8775 // Probe the supported sample rates.
8776 info.sampleRates.clear();
8777 if ( ainfo.nrates ) {
8778 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8779 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8780 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8781 info.sampleRates.push_back( SAMPLE_RATES[k] );
8783 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8784 info.preferredSampleRate = SAMPLE_RATES[k];
8792 // Check min and max rate values;
8793 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8794 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8795 info.sampleRates.push_back( SAMPLE_RATES[k] );
8797 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8798 info.preferredSampleRate = SAMPLE_RATES[k];
8803 if ( info.sampleRates.size() == 0 ) {
8804 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8805 errorText_ = errorStream_.str();
8806 error( RtAudioError::WARNING );
8810 info.name = ainfo.name;
8817 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8818 unsigned int firstChannel, unsigned int sampleRate,
8819 RtAudioFormat format, unsigned int *bufferSize,
8820 RtAudio::StreamOptions *options )
8822 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8823 if ( mixerfd == -1 ) {
8824 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8828 oss_sysinfo sysinfo;
8829 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8830 if ( result == -1 ) {
8832 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8836 unsigned nDevices = sysinfo.numaudios;
8837 if ( nDevices == 0 ) {
8838 // This should not happen because a check is made before this function is called.
8840 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8844 if ( device >= nDevices ) {
8845 // This should not happen because a check is made before this function is called.
8847 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8851 oss_audioinfo ainfo;
8853 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8855 if ( result == -1 ) {
8856 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8857 errorText_ = errorStream_.str();
8861 // Check if device supports input or output
8862 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8863 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8864 if ( mode == OUTPUT )
8865 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8867 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8868 errorText_ = errorStream_.str();
8873 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8874 if ( mode == OUTPUT )
8876 else { // mode == INPUT
8877 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8878 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8879 close( handle->id[0] );
8881 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8882 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8883 errorText_ = errorStream_.str();
8886 // Check that the number previously set channels is the same.
8887 if ( stream_.nUserChannels[0] != channels ) {
8888 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8889 errorText_ = errorStream_.str();
8898 // Set exclusive access if specified.
8899 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8901 // Try to open the device.
8903 fd = open( ainfo.devnode, flags, 0 );
8905 if ( errno == EBUSY )
8906 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8908 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8909 errorText_ = errorStream_.str();
8913 // For duplex operation, specifically set this mode (this doesn't seem to work).
8915 if ( flags | O_RDWR ) {
8916 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8917 if ( result == -1) {
8918 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8919 errorText_ = errorStream_.str();
8925 // Check the device channel support.
8926 stream_.nUserChannels[mode] = channels;
8927 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8929 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8930 errorText_ = errorStream_.str();
8934 // Set the number of channels.
8935 int deviceChannels = channels + firstChannel;
8936 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8937 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8939 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8940 errorText_ = errorStream_.str();
8943 stream_.nDeviceChannels[mode] = deviceChannels;
8945 // Get the data format mask
8947 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8948 if ( result == -1 ) {
8950 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8951 errorText_ = errorStream_.str();
8955 // Determine how to set the device format.
8956 stream_.userFormat = format;
8957 int deviceFormat = -1;
8958 stream_.doByteSwap[mode] = false;
8959 if ( format == RTAUDIO_SINT8 ) {
8960 if ( mask & AFMT_S8 ) {
8961 deviceFormat = AFMT_S8;
8962 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8965 else if ( format == RTAUDIO_SINT16 ) {
8966 if ( mask & AFMT_S16_NE ) {
8967 deviceFormat = AFMT_S16_NE;
8968 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8970 else if ( mask & AFMT_S16_OE ) {
8971 deviceFormat = AFMT_S16_OE;
8972 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8973 stream_.doByteSwap[mode] = true;
8976 else if ( format == RTAUDIO_SINT24 ) {
8977 if ( mask & AFMT_S24_NE ) {
8978 deviceFormat = AFMT_S24_NE;
8979 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8981 else if ( mask & AFMT_S24_OE ) {
8982 deviceFormat = AFMT_S24_OE;
8983 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8984 stream_.doByteSwap[mode] = true;
8987 else if ( format == RTAUDIO_SINT32 ) {
8988 if ( mask & AFMT_S32_NE ) {
8989 deviceFormat = AFMT_S32_NE;
8990 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8992 else if ( mask & AFMT_S32_OE ) {
8993 deviceFormat = AFMT_S32_OE;
8994 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8995 stream_.doByteSwap[mode] = true;
8999 if ( deviceFormat == -1 ) {
9000 // The user requested format is not natively supported by the device.
9001 if ( mask & AFMT_S16_NE ) {
9002 deviceFormat = AFMT_S16_NE;
9003 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9005 else if ( mask & AFMT_S32_NE ) {
9006 deviceFormat = AFMT_S32_NE;
9007 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9009 else if ( mask & AFMT_S24_NE ) {
9010 deviceFormat = AFMT_S24_NE;
9011 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9013 else if ( mask & AFMT_S16_OE ) {
9014 deviceFormat = AFMT_S16_OE;
9015 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9016 stream_.doByteSwap[mode] = true;
9018 else if ( mask & AFMT_S32_OE ) {
9019 deviceFormat = AFMT_S32_OE;
9020 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9021 stream_.doByteSwap[mode] = true;
9023 else if ( mask & AFMT_S24_OE ) {
9024 deviceFormat = AFMT_S24_OE;
9025 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9026 stream_.doByteSwap[mode] = true;
9028 else if ( mask & AFMT_S8) {
9029 deviceFormat = AFMT_S8;
9030 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9034 if ( stream_.deviceFormat[mode] == 0 ) {
9035 // This really shouldn't happen ...
9037 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9038 errorText_ = errorStream_.str();
9042 // Set the data format.
9043 int temp = deviceFormat;
9044 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9045 if ( result == -1 || deviceFormat != temp ) {
9047 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9048 errorText_ = errorStream_.str();
9052 // Attempt to set the buffer size. According to OSS, the minimum
9053 // number of buffers is two. The supposed minimum buffer size is 16
9054 // bytes, so that will be our lower bound. The argument to this
9055 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9056 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9057 // We'll check the actual value used near the end of the setup
9059 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9060 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9062 if ( options ) buffers = options->numberOfBuffers;
9063 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9064 if ( buffers < 2 ) buffers = 3;
9065 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9066 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9067 if ( result == -1 ) {
9069 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9070 errorText_ = errorStream_.str();
9073 stream_.nBuffers = buffers;
9075 // Save buffer size (in sample frames).
9076 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9077 stream_.bufferSize = *bufferSize;
9079 // Set the sample rate.
9080 int srate = sampleRate;
9081 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9082 if ( result == -1 ) {
9084 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9085 errorText_ = errorStream_.str();
9089 // Verify the sample rate setup worked.
9090 if ( abs( srate - (int)sampleRate ) > 100 ) {
9092 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9093 errorText_ = errorStream_.str();
9096 stream_.sampleRate = sampleRate;
9098 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9099 // We're doing duplex setup here.
9100 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9101 stream_.nDeviceChannels[0] = deviceChannels;
9104 // Set interleaving parameters.
9105 stream_.userInterleaved = true;
9106 stream_.deviceInterleaved[mode] = true;
9107 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9108 stream_.userInterleaved = false;
9110 // Set flags for buffer conversion
9111 stream_.doConvertBuffer[mode] = false;
9112 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9113 stream_.doConvertBuffer[mode] = true;
9114 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9115 stream_.doConvertBuffer[mode] = true;
9116 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9117 stream_.nUserChannels[mode] > 1 )
9118 stream_.doConvertBuffer[mode] = true;
9120 // Allocate the stream handles if necessary and then save.
9121 if ( stream_.apiHandle == 0 ) {
9123 handle = new OssHandle;
9125 catch ( std::bad_alloc& ) {
9126 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9130 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9131 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9135 stream_.apiHandle = (void *) handle;
9138 handle = (OssHandle *) stream_.apiHandle;
9140 handle->id[mode] = fd;
9142 // Allocate necessary internal buffers.
9143 unsigned long bufferBytes;
9144 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9145 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9146 if ( stream_.userBuffer[mode] == NULL ) {
9147 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9151 if ( stream_.doConvertBuffer[mode] ) {
9153 bool makeBuffer = true;
9154 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9155 if ( mode == INPUT ) {
9156 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9157 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9158 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9163 bufferBytes *= *bufferSize;
9164 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9165 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9166 if ( stream_.deviceBuffer == NULL ) {
9167 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9173 stream_.device[mode] = device;
9174 stream_.state = STREAM_STOPPED;
9176 // Setup the buffer conversion information structure.
9177 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9179 // Setup thread if necessary.
9180 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9181 // We had already set up an output stream.
9182 stream_.mode = DUPLEX;
9183 if ( stream_.device[0] == device ) handle->id[0] = fd;
9186 stream_.mode = mode;
9188 // Setup callback thread.
9189 stream_.callbackInfo.object = (void *) this;
9191 // Set the thread attributes for joinable and realtime scheduling
9192 // priority. The higher priority will only take affect if the
9193 // program is run as root or suid.
9194 pthread_attr_t attr;
9195 pthread_attr_init( &attr );
9196 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9197 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9198 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9199 stream_.callbackInfo.doRealtime = true;
9200 struct sched_param param;
9201 int priority = options->priority;
9202 int min = sched_get_priority_min( SCHED_RR );
9203 int max = sched_get_priority_max( SCHED_RR );
9204 if ( priority < min ) priority = min;
9205 else if ( priority > max ) priority = max;
9206 param.sched_priority = priority;
9208 // Set the policy BEFORE the priority. Otherwise it fails.
9209 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9210 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9211 // This is definitely required. Otherwise it fails.
9212 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9213 pthread_attr_setschedparam(&attr, ¶m);
9216 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9218 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9221 stream_.callbackInfo.isRunning = true;
9222 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9223 pthread_attr_destroy( &attr );
9225 // Failed. Try instead with default attributes.
9226 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9228 stream_.callbackInfo.isRunning = false;
9229 errorText_ = "RtApiOss::error creating callback thread!";
9239 pthread_cond_destroy( &handle->runnable );
9240 if ( handle->id[0] ) close( handle->id[0] );
9241 if ( handle->id[1] ) close( handle->id[1] );
9243 stream_.apiHandle = 0;
9246 for ( int i=0; i<2; i++ ) {
9247 if ( stream_.userBuffer[i] ) {
9248 free( stream_.userBuffer[i] );
9249 stream_.userBuffer[i] = 0;
9253 if ( stream_.deviceBuffer ) {
9254 free( stream_.deviceBuffer );
9255 stream_.deviceBuffer = 0;
9258 stream_.state = STREAM_CLOSED;
9262 void RtApiOss :: closeStream()
9264 if ( stream_.state == STREAM_CLOSED ) {
9265 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9266 error( RtAudioError::WARNING );
9270 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9271 stream_.callbackInfo.isRunning = false;
9272 MUTEX_LOCK( &stream_.mutex );
9273 if ( stream_.state == STREAM_STOPPED )
9274 pthread_cond_signal( &handle->runnable );
9275 MUTEX_UNLOCK( &stream_.mutex );
9276 pthread_join( stream_.callbackInfo.thread, NULL );
9278 if ( stream_.state == STREAM_RUNNING ) {
9279 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9280 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9282 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9283 stream_.state = STREAM_STOPPED;
9287 pthread_cond_destroy( &handle->runnable );
9288 if ( handle->id[0] ) close( handle->id[0] );
9289 if ( handle->id[1] ) close( handle->id[1] );
9291 stream_.apiHandle = 0;
9294 for ( int i=0; i<2; i++ ) {
9295 if ( stream_.userBuffer[i] ) {
9296 free( stream_.userBuffer[i] );
9297 stream_.userBuffer[i] = 0;
9301 if ( stream_.deviceBuffer ) {
9302 free( stream_.deviceBuffer );
9303 stream_.deviceBuffer = 0;
9306 stream_.mode = UNINITIALIZED;
9307 stream_.state = STREAM_CLOSED;
9310 void RtApiOss :: startStream()
9313 if ( stream_.state == STREAM_RUNNING ) {
9314 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9315 error( RtAudioError::WARNING );
9319 MUTEX_LOCK( &stream_.mutex );
9321 stream_.state = STREAM_RUNNING;
9323 // No need to do anything else here ... OSS automatically starts
9324 // when fed samples.
9326 MUTEX_UNLOCK( &stream_.mutex );
9328 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9329 pthread_cond_signal( &handle->runnable );
9332 void RtApiOss :: stopStream()
9335 if ( stream_.state == STREAM_STOPPED ) {
9336 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9337 error( RtAudioError::WARNING );
9341 MUTEX_LOCK( &stream_.mutex );
9343 // The state might change while waiting on a mutex.
9344 if ( stream_.state == STREAM_STOPPED ) {
9345 MUTEX_UNLOCK( &stream_.mutex );
9350 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9351 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9353 // Flush the output with zeros a few times.
9356 RtAudioFormat format;
9358 if ( stream_.doConvertBuffer[0] ) {
9359 buffer = stream_.deviceBuffer;
9360 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9361 format = stream_.deviceFormat[0];
9364 buffer = stream_.userBuffer[0];
9365 samples = stream_.bufferSize * stream_.nUserChannels[0];
9366 format = stream_.userFormat;
9369 memset( buffer, 0, samples * formatBytes(format) );
9370 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9371 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9372 if ( result == -1 ) {
9373 errorText_ = "RtApiOss::stopStream: audio write error.";
9374 error( RtAudioError::WARNING );
9378 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9379 if ( result == -1 ) {
9380 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9381 errorText_ = errorStream_.str();
9384 handle->triggered = false;
9387 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9388 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9389 if ( result == -1 ) {
9390 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9391 errorText_ = errorStream_.str();
9397 stream_.state = STREAM_STOPPED;
9398 MUTEX_UNLOCK( &stream_.mutex );
9400 if ( result != -1 ) return;
9401 error( RtAudioError::SYSTEM_ERROR );
9404 void RtApiOss :: abortStream()
9407 if ( stream_.state == STREAM_STOPPED ) {
9408 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9409 error( RtAudioError::WARNING );
9413 MUTEX_LOCK( &stream_.mutex );
9415 // The state might change while waiting on a mutex.
9416 if ( stream_.state == STREAM_STOPPED ) {
9417 MUTEX_UNLOCK( &stream_.mutex );
9422 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9423 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9424 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9425 if ( result == -1 ) {
9426 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9427 errorText_ = errorStream_.str();
9430 handle->triggered = false;
9433 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9434 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9435 if ( result == -1 ) {
9436 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9437 errorText_ = errorStream_.str();
9443 stream_.state = STREAM_STOPPED;
9444 MUTEX_UNLOCK( &stream_.mutex );
9446 if ( result != -1 ) return;
9447 error( RtAudioError::SYSTEM_ERROR );
9450 void RtApiOss :: callbackEvent()
9452 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9453 if ( stream_.state == STREAM_STOPPED ) {
9454 MUTEX_LOCK( &stream_.mutex );
9455 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9456 if ( stream_.state != STREAM_RUNNING ) {
9457 MUTEX_UNLOCK( &stream_.mutex );
9460 MUTEX_UNLOCK( &stream_.mutex );
9463 if ( stream_.state == STREAM_CLOSED ) {
9464 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9465 error( RtAudioError::WARNING );
9469 // Invoke user callback to get fresh output data.
9470 int doStopStream = 0;
9471 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9472 double streamTime = getStreamTime();
9473 RtAudioStreamStatus status = 0;
9474 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9475 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9476 handle->xrun[0] = false;
9478 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9479 status |= RTAUDIO_INPUT_OVERFLOW;
9480 handle->xrun[1] = false;
9482 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9483 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9484 if ( doStopStream == 2 ) {
9485 this->abortStream();
9489 MUTEX_LOCK( &stream_.mutex );
9491 // The state might change while waiting on a mutex.
9492 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9497 RtAudioFormat format;
9499 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9501 // Setup parameters and do buffer conversion if necessary.
9502 if ( stream_.doConvertBuffer[0] ) {
9503 buffer = stream_.deviceBuffer;
9504 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9505 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9506 format = stream_.deviceFormat[0];
9509 buffer = stream_.userBuffer[0];
9510 samples = stream_.bufferSize * stream_.nUserChannels[0];
9511 format = stream_.userFormat;
9514 // Do byte swapping if necessary.
9515 if ( stream_.doByteSwap[0] )
9516 byteSwapBuffer( buffer, samples, format );
9518 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9520 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9521 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9522 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9523 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9524 handle->triggered = true;
9527 // Write samples to device.
9528 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9530 if ( result == -1 ) {
9531 // We'll assume this is an underrun, though there isn't a
9532 // specific means for determining that.
9533 handle->xrun[0] = true;
9534 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9535 error( RtAudioError::WARNING );
9536 // Continue on to input section.
9540 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9542 // Setup parameters.
9543 if ( stream_.doConvertBuffer[1] ) {
9544 buffer = stream_.deviceBuffer;
9545 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9546 format = stream_.deviceFormat[1];
9549 buffer = stream_.userBuffer[1];
9550 samples = stream_.bufferSize * stream_.nUserChannels[1];
9551 format = stream_.userFormat;
9554 // Read samples from device.
9555 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9557 if ( result == -1 ) {
9558 // We'll assume this is an overrun, though there isn't a
9559 // specific means for determining that.
9560 handle->xrun[1] = true;
9561 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9562 error( RtAudioError::WARNING );
9566 // Do byte swapping if necessary.
9567 if ( stream_.doByteSwap[1] )
9568 byteSwapBuffer( buffer, samples, format );
9570 // Do buffer conversion if necessary.
9571 if ( stream_.doConvertBuffer[1] )
9572 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9576 MUTEX_UNLOCK( &stream_.mutex );
9578 RtApi::tickStreamTime();
9579 if ( doStopStream == 1 ) this->stopStream();
9582 static void *ossCallbackHandler( void *ptr )
9584 CallbackInfo *info = (CallbackInfo *) ptr;
9585 RtApiOss *object = (RtApiOss *) info->object;
9586 bool *isRunning = &info->isRunning;
9588 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9589 if (info->doRealtime) {
9590 std::cerr << "RtAudio oss: " <<
9591 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9592 "running realtime scheduling" << std::endl;
9596 while ( *isRunning == true ) {
9597 pthread_testcancel();
9598 object->callbackEvent();
9601 pthread_exit( NULL );
9604 //******************** End of __LINUX_OSS__ *********************//
9608 // *************************************************** //
9610 // Protected common (OS-independent) RtAudio methods.
9612 // *************************************************** //
9614 // This method can be modified to control the behavior of error
9615 // message printing.
9616 void RtApi :: error( RtAudioError::Type type )
9618 errorStream_.str(""); // clear the ostringstream
9620 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9621 if ( errorCallback ) {
9622 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9624 if ( firstErrorOccurred_ )
9627 firstErrorOccurred_ = true;
9628 const std::string errorMessage = errorText_;
9630 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9631 stream_.callbackInfo.isRunning = false; // exit from the thread
9635 errorCallback( type, errorMessage );
9636 firstErrorOccurred_ = false;
9640 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9641 std::cerr << '\n' << errorText_ << "\n\n";
9642 else if ( type != RtAudioError::WARNING )
9643 throw( RtAudioError( errorText_, type ) );
9646 void RtApi :: verifyStream()
9648 if ( stream_.state == STREAM_CLOSED ) {
9649 errorText_ = "RtApi:: a stream is not open!";
9650 error( RtAudioError::INVALID_USE );
9654 void RtApi :: clearStreamInfo()
9656 stream_.mode = UNINITIALIZED;
9657 stream_.state = STREAM_CLOSED;
9658 stream_.sampleRate = 0;
9659 stream_.bufferSize = 0;
9660 stream_.nBuffers = 0;
9661 stream_.userFormat = 0;
9662 stream_.userInterleaved = true;
9663 stream_.streamTime = 0.0;
9664 stream_.apiHandle = 0;
9665 stream_.deviceBuffer = 0;
9666 stream_.callbackInfo.callback = 0;
9667 stream_.callbackInfo.userData = 0;
9668 stream_.callbackInfo.isRunning = false;
9669 stream_.callbackInfo.errorCallback = 0;
9670 for ( int i=0; i<2; i++ ) {
9671 stream_.device[i] = 11111;
9672 stream_.doConvertBuffer[i] = false;
9673 stream_.deviceInterleaved[i] = true;
9674 stream_.doByteSwap[i] = false;
9675 stream_.nUserChannels[i] = 0;
9676 stream_.nDeviceChannels[i] = 0;
9677 stream_.channelOffset[i] = 0;
9678 stream_.deviceFormat[i] = 0;
9679 stream_.latency[i] = 0;
9680 stream_.userBuffer[i] = 0;
9681 stream_.convertInfo[i].channels = 0;
9682 stream_.convertInfo[i].inJump = 0;
9683 stream_.convertInfo[i].outJump = 0;
9684 stream_.convertInfo[i].inFormat = 0;
9685 stream_.convertInfo[i].outFormat = 0;
9686 stream_.convertInfo[i].inOffset.clear();
9687 stream_.convertInfo[i].outOffset.clear();
9691 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9693 if ( format == RTAUDIO_SINT16 )
9695 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9697 else if ( format == RTAUDIO_FLOAT64 )
9699 else if ( format == RTAUDIO_SINT24 )
9701 else if ( format == RTAUDIO_SINT8 )
9704 errorText_ = "RtApi::formatBytes: undefined format.";
9705 error( RtAudioError::WARNING );
9710 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9712 if ( mode == INPUT ) { // convert device to user buffer
9713 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9714 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9715 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9716 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9718 else { // convert user to device buffer
9719 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9720 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9721 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9722 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9725 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9726 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9728 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9730 // Set up the interleave/deinterleave offsets.
9731 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9732 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9733 ( mode == INPUT && stream_.userInterleaved ) ) {
9734 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9735 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9736 stream_.convertInfo[mode].outOffset.push_back( k );
9737 stream_.convertInfo[mode].inJump = 1;
9741 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9742 stream_.convertInfo[mode].inOffset.push_back( k );
9743 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9744 stream_.convertInfo[mode].outJump = 1;
9748 else { // no (de)interleaving
9749 if ( stream_.userInterleaved ) {
9750 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9751 stream_.convertInfo[mode].inOffset.push_back( k );
9752 stream_.convertInfo[mode].outOffset.push_back( k );
9756 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9757 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9758 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9759 stream_.convertInfo[mode].inJump = 1;
9760 stream_.convertInfo[mode].outJump = 1;
9765 // Add channel offset.
9766 if ( firstChannel > 0 ) {
9767 if ( stream_.deviceInterleaved[mode] ) {
9768 if ( mode == OUTPUT ) {
9769 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9770 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9773 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9774 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9778 if ( mode == OUTPUT ) {
9779 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9780 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9783 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9784 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9790 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9792 // This function does format conversion, input/output channel compensation, and
9793 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9794 // the lower three bytes of a 32-bit integer.
9796 // Clear our device buffer when in/out duplex device channels are different
9797 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9798 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9799 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9802 if (info.outFormat == RTAUDIO_FLOAT64) {
9804 Float64 *out = (Float64 *)outBuffer;
9806 if (info.inFormat == RTAUDIO_SINT8) {
9807 signed char *in = (signed char *)inBuffer;
9808 scale = 1.0 / 127.5;
9809 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9810 for (j=0; j<info.channels; j++) {
9811 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9812 out[info.outOffset[j]] += 0.5;
9813 out[info.outOffset[j]] *= scale;
9816 out += info.outJump;
9819 else if (info.inFormat == RTAUDIO_SINT16) {
9820 Int16 *in = (Int16 *)inBuffer;
9821 scale = 1.0 / 32767.5;
9822 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9823 for (j=0; j<info.channels; j++) {
9824 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9825 out[info.outOffset[j]] += 0.5;
9826 out[info.outOffset[j]] *= scale;
9829 out += info.outJump;
9832 else if (info.inFormat == RTAUDIO_SINT24) {
9833 Int24 *in = (Int24 *)inBuffer;
9834 scale = 1.0 / 8388607.5;
9835 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9836 for (j=0; j<info.channels; j++) {
9837 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9838 out[info.outOffset[j]] += 0.5;
9839 out[info.outOffset[j]] *= scale;
9842 out += info.outJump;
9845 else if (info.inFormat == RTAUDIO_SINT32) {
9846 Int32 *in = (Int32 *)inBuffer;
9847 scale = 1.0 / 2147483647.5;
9848 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9849 for (j=0; j<info.channels; j++) {
9850 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9851 out[info.outOffset[j]] += 0.5;
9852 out[info.outOffset[j]] *= scale;
9855 out += info.outJump;
9858 else if (info.inFormat == RTAUDIO_FLOAT32) {
9859 Float32 *in = (Float32 *)inBuffer;
9860 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9861 for (j=0; j<info.channels; j++) {
9862 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9865 out += info.outJump;
9868 else if (info.inFormat == RTAUDIO_FLOAT64) {
9869 // Channel compensation and/or (de)interleaving only.
9870 Float64 *in = (Float64 *)inBuffer;
9871 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9872 for (j=0; j<info.channels; j++) {
9873 out[info.outOffset[j]] = in[info.inOffset[j]];
9876 out += info.outJump;
9880 else if (info.outFormat == RTAUDIO_FLOAT32) {
9882 Float32 *out = (Float32 *)outBuffer;
9884 if (info.inFormat == RTAUDIO_SINT8) {
9885 signed char *in = (signed char *)inBuffer;
9886 scale = (Float32) ( 1.0 / 127.5 );
9887 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9888 for (j=0; j<info.channels; j++) {
9889 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9890 out[info.outOffset[j]] += 0.5;
9891 out[info.outOffset[j]] *= scale;
9894 out += info.outJump;
9897 else if (info.inFormat == RTAUDIO_SINT16) {
9898 Int16 *in = (Int16 *)inBuffer;
9899 scale = (Float32) ( 1.0 / 32767.5 );
9900 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9901 for (j=0; j<info.channels; j++) {
9902 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9903 out[info.outOffset[j]] += 0.5;
9904 out[info.outOffset[j]] *= scale;
9907 out += info.outJump;
9910 else if (info.inFormat == RTAUDIO_SINT24) {
9911 Int24 *in = (Int24 *)inBuffer;
9912 scale = (Float32) ( 1.0 / 8388607.5 );
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9914 for (j=0; j<info.channels; j++) {
9915 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9916 out[info.outOffset[j]] += 0.5;
9917 out[info.outOffset[j]] *= scale;
9920 out += info.outJump;
9923 else if (info.inFormat == RTAUDIO_SINT32) {
9924 Int32 *in = (Int32 *)inBuffer;
9925 scale = (Float32) ( 1.0 / 2147483647.5 );
9926 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9927 for (j=0; j<info.channels; j++) {
9928 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9929 out[info.outOffset[j]] += 0.5;
9930 out[info.outOffset[j]] *= scale;
9933 out += info.outJump;
9936 else if (info.inFormat == RTAUDIO_FLOAT32) {
9937 // Channel compensation and/or (de)interleaving only.
9938 Float32 *in = (Float32 *)inBuffer;
9939 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9940 for (j=0; j<info.channels; j++) {
9941 out[info.outOffset[j]] = in[info.inOffset[j]];
9944 out += info.outJump;
9947 else if (info.inFormat == RTAUDIO_FLOAT64) {
9948 Float64 *in = (Float64 *)inBuffer;
9949 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9950 for (j=0; j<info.channels; j++) {
9951 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9954 out += info.outJump;
9958 else if (info.outFormat == RTAUDIO_SINT32) {
9959 Int32 *out = (Int32 *)outBuffer;
9960 if (info.inFormat == RTAUDIO_SINT8) {
9961 signed char *in = (signed char *)inBuffer;
9962 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9963 for (j=0; j<info.channels; j++) {
9964 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9965 out[info.outOffset[j]] <<= 24;
9968 out += info.outJump;
9971 else if (info.inFormat == RTAUDIO_SINT16) {
9972 Int16 *in = (Int16 *)inBuffer;
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9974 for (j=0; j<info.channels; j++) {
9975 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9976 out[info.outOffset[j]] <<= 16;
9979 out += info.outJump;
9982 else if (info.inFormat == RTAUDIO_SINT24) {
9983 Int24 *in = (Int24 *)inBuffer;
9984 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9985 for (j=0; j<info.channels; j++) {
9986 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9987 out[info.outOffset[j]] <<= 8;
9990 out += info.outJump;
9993 else if (info.inFormat == RTAUDIO_SINT32) {
9994 // Channel compensation and/or (de)interleaving only.
9995 Int32 *in = (Int32 *)inBuffer;
9996 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9997 for (j=0; j<info.channels; j++) {
9998 out[info.outOffset[j]] = in[info.inOffset[j]];
10001 out += info.outJump;
10004 else if (info.inFormat == RTAUDIO_FLOAT32) {
10005 Float32 *in = (Float32 *)inBuffer;
10006 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10007 for (j=0; j<info.channels; j++) {
10008 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10011 out += info.outJump;
10014 else if (info.inFormat == RTAUDIO_FLOAT64) {
10015 Float64 *in = (Float64 *)inBuffer;
10016 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10017 for (j=0; j<info.channels; j++) {
10018 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10021 out += info.outJump;
10025 else if (info.outFormat == RTAUDIO_SINT24) {
10026 Int24 *out = (Int24 *)outBuffer;
10027 if (info.inFormat == RTAUDIO_SINT8) {
10028 signed char *in = (signed char *)inBuffer;
10029 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10030 for (j=0; j<info.channels; j++) {
10031 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10032 //out[info.outOffset[j]] <<= 16;
10035 out += info.outJump;
10038 else if (info.inFormat == RTAUDIO_SINT16) {
10039 Int16 *in = (Int16 *)inBuffer;
10040 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10041 for (j=0; j<info.channels; j++) {
10042 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10043 //out[info.outOffset[j]] <<= 8;
10046 out += info.outJump;
10049 else if (info.inFormat == RTAUDIO_SINT24) {
10050 // Channel compensation and/or (de)interleaving only.
10051 Int24 *in = (Int24 *)inBuffer;
10052 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10053 for (j=0; j<info.channels; j++) {
10054 out[info.outOffset[j]] = in[info.inOffset[j]];
10057 out += info.outJump;
10060 else if (info.inFormat == RTAUDIO_SINT32) {
10061 Int32 *in = (Int32 *)inBuffer;
10062 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10063 for (j=0; j<info.channels; j++) {
10064 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10065 //out[info.outOffset[j]] >>= 8;
10068 out += info.outJump;
10071 else if (info.inFormat == RTAUDIO_FLOAT32) {
10072 Float32 *in = (Float32 *)inBuffer;
10073 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10074 for (j=0; j<info.channels; j++) {
10075 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10078 out += info.outJump;
10081 else if (info.inFormat == RTAUDIO_FLOAT64) {
10082 Float64 *in = (Float64 *)inBuffer;
10083 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10084 for (j=0; j<info.channels; j++) {
10085 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10088 out += info.outJump;
10092 else if (info.outFormat == RTAUDIO_SINT16) {
10093 Int16 *out = (Int16 *)outBuffer;
10094 if (info.inFormat == RTAUDIO_SINT8) {
10095 signed char *in = (signed char *)inBuffer;
10096 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10097 for (j=0; j<info.channels; j++) {
10098 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10099 out[info.outOffset[j]] <<= 8;
10102 out += info.outJump;
10105 else if (info.inFormat == RTAUDIO_SINT16) {
10106 // Channel compensation and/or (de)interleaving only.
10107 Int16 *in = (Int16 *)inBuffer;
10108 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10109 for (j=0; j<info.channels; j++) {
10110 out[info.outOffset[j]] = in[info.inOffset[j]];
10113 out += info.outJump;
10116 else if (info.inFormat == RTAUDIO_SINT24) {
10117 Int24 *in = (Int24 *)inBuffer;
10118 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10119 for (j=0; j<info.channels; j++) {
10120 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10123 out += info.outJump;
10126 else if (info.inFormat == RTAUDIO_SINT32) {
10127 Int32 *in = (Int32 *)inBuffer;
10128 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10129 for (j=0; j<info.channels; j++) {
10130 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10133 out += info.outJump;
10136 else if (info.inFormat == RTAUDIO_FLOAT32) {
10137 Float32 *in = (Float32 *)inBuffer;
10138 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10139 for (j=0; j<info.channels; j++) {
10140 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10143 out += info.outJump;
10146 else if (info.inFormat == RTAUDIO_FLOAT64) {
10147 Float64 *in = (Float64 *)inBuffer;
10148 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10149 for (j=0; j<info.channels; j++) {
10150 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10153 out += info.outJump;
10157 else if (info.outFormat == RTAUDIO_SINT8) {
10158 signed char *out = (signed char *)outBuffer;
10159 if (info.inFormat == RTAUDIO_SINT8) {
10160 // Channel compensation and/or (de)interleaving only.
10161 signed char *in = (signed char *)inBuffer;
10162 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10163 for (j=0; j<info.channels; j++) {
10164 out[info.outOffset[j]] = in[info.inOffset[j]];
10167 out += info.outJump;
10170 if (info.inFormat == RTAUDIO_SINT16) {
10171 Int16 *in = (Int16 *)inBuffer;
10172 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10173 for (j=0; j<info.channels; j++) {
10174 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10177 out += info.outJump;
10180 else if (info.inFormat == RTAUDIO_SINT24) {
10181 Int24 *in = (Int24 *)inBuffer;
10182 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10183 for (j=0; j<info.channels; j++) {
10184 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10187 out += info.outJump;
10190 else if (info.inFormat == RTAUDIO_SINT32) {
10191 Int32 *in = (Int32 *)inBuffer;
10192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10193 for (j=0; j<info.channels; j++) {
10194 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10197 out += info.outJump;
10200 else if (info.inFormat == RTAUDIO_FLOAT32) {
10201 Float32 *in = (Float32 *)inBuffer;
10202 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10203 for (j=0; j<info.channels; j++) {
10204 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10207 out += info.outJump;
10210 else if (info.inFormat == RTAUDIO_FLOAT64) {
10211 Float64 *in = (Float64 *)inBuffer;
10212 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10213 for (j=0; j<info.channels; j++) {
10214 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10217 out += info.outJump;
10223 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10224 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10225 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10227 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10233 if ( format == RTAUDIO_SINT16 ) {
10234 for ( unsigned int i=0; i<samples; i++ ) {
10235 // Swap 1st and 2nd bytes.
10240 // Increment 2 bytes.
10244 else if ( format == RTAUDIO_SINT32 ||
10245 format == RTAUDIO_FLOAT32 ) {
10246 for ( unsigned int i=0; i<samples; i++ ) {
10247 // Swap 1st and 4th bytes.
10252 // Swap 2nd and 3rd bytes.
10258 // Increment 3 more bytes.
10262 else if ( format == RTAUDIO_SINT24 ) {
10263 for ( unsigned int i=0; i<samples; i++ ) {
10264 // Swap 1st and 3rd bytes.
10269 // Increment 2 more bytes.
10273 else if ( format == RTAUDIO_FLOAT64 ) {
10274 for ( unsigned int i=0; i<samples; i++ ) {
10275 // Swap 1st and 8th bytes
10280 // Swap 2nd and 7th bytes
10286 // Swap 3rd and 6th bytes
10292 // Swap 4th and 5th bytes
10298 // Increment 5 more bytes.
10304 // Indentation settings for Vim and Emacs
10306 // Local Variables:
10307 // c-basic-offset: 2
10308 // indent-tabs-mode: nil
10311 // vim: et sts=2 sw=2