1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // TODO: replace this with something nicer (C++11)
156 static const std::vector<RtAudio::Api> init_compiledApis() {
157 std::vector<RtAudio::Api> apis;
158 for (unsigned int i=0; i<rtaudio_num_compiled_apis; i++)
159 apis.push_back(rtaudio_compiled_apis[i]);
162 const std::vector<RtAudio::Api> RtAudio::compiledApis(init_compiledApis());
164 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
165 // If the build breaks here, check that they match.
166 template<bool b> class StaticAssert { private: StaticAssert() {} };
167 template<> class StaticAssert<true>{ public: StaticAssert() {} };
168 class StaticAssertions { StaticAssertions() {
169 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
172 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
177 const std::vector<RtAudio::Api>& RtAudio :: getCompiledApi()
182 const std::string RtAudio :: getCompiledApiName( RtAudio::Api api )
184 if (api < 0 || api > RtAudio::NUM_APIS
185 || (std::find(RtAudio::compiledApis.begin(),
186 RtAudio::compiledApis.end(), api) == RtAudio::compiledApis.end()))
188 return rtaudio_api_names[api][0];
191 const std::string RtAudio :: getCompiledApiDisplayName( RtAudio::Api api )
193 if (api < 0 || api > RtAudio::NUM_APIS
194 || (std::find(RtAudio::compiledApis.begin(),
195 RtAudio::compiledApis.end(), api) == RtAudio::compiledApis.end()))
197 return rtaudio_api_names[api][1];
200 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
203 std::vector<RtAudio::Api>::const_iterator it;
204 for (it = compiledApis.begin(); it != compiledApis.end(); ++it, ++i)
205 if (name == rtaudio_api_names[*it][0])
207 return RtAudio::UNSPECIFIED;
210 void RtAudio :: openRtApi( RtAudio::Api api )
216 #if defined(__UNIX_JACK__)
217 if ( api == UNIX_JACK )
218 rtapi_ = new RtApiJack();
220 #if defined(__LINUX_ALSA__)
221 if ( api == LINUX_ALSA )
222 rtapi_ = new RtApiAlsa();
224 #if defined(__LINUX_PULSE__)
225 if ( api == LINUX_PULSE )
226 rtapi_ = new RtApiPulse();
228 #if defined(__LINUX_OSS__)
229 if ( api == LINUX_OSS )
230 rtapi_ = new RtApiOss();
232 #if defined(__WINDOWS_ASIO__)
233 if ( api == WINDOWS_ASIO )
234 rtapi_ = new RtApiAsio();
236 #if defined(__WINDOWS_WASAPI__)
237 if ( api == WINDOWS_WASAPI )
238 rtapi_ = new RtApiWasapi();
240 #if defined(__WINDOWS_DS__)
241 if ( api == WINDOWS_DS )
242 rtapi_ = new RtApiDs();
244 #if defined(__MACOSX_CORE__)
245 if ( api == MACOSX_CORE )
246 rtapi_ = new RtApiCore();
248 #if defined(__RTAUDIO_DUMMY__)
249 if ( api == RTAUDIO_DUMMY )
250 rtapi_ = new RtApiDummy();
254 RtAudio :: RtAudio( RtAudio::Api api )
258 if ( api != UNSPECIFIED ) {
259 // Attempt to open the specified API.
261 if ( rtapi_ ) return;
263 // No compiled support for specified API value. Issue a debug
264 // warning and continue as if no API was specified.
265 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
268 // Iterate through the compiled APIs and return as soon as we find
269 // one with at least one device or we reach the end of the list.
270 std::vector< RtAudio::Api > apis;
271 getCompiledApi( apis );
272 for ( unsigned int i=0; i<apis.size(); i++ ) {
273 openRtApi( apis[i] );
274 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
277 if ( rtapi_ ) return;
279 // It should not be possible to get here because the preprocessor
280 // definition __RTAUDIO_DUMMY__ is automatically defined if no
281 // API-specific definitions are passed to the compiler. But just in
282 // case something weird happens, we'll thow an error.
283 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
284 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
287 RtAudio :: ~RtAudio()
293 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
294 RtAudio::StreamParameters *inputParameters,
295 RtAudioFormat format, unsigned int sampleRate,
296 unsigned int *bufferFrames,
297 RtAudioCallback callback, void *userData,
298 RtAudio::StreamOptions *options,
299 RtAudioErrorCallback errorCallback )
301 return rtapi_->openStream( outputParameters, inputParameters, format,
302 sampleRate, bufferFrames, callback,
303 userData, options, errorCallback );
306 // *************************************************** //
308 // Public RtApi definitions (see end of file for
309 // private or protected utility functions).
311 // *************************************************** //
315 stream_.state = STREAM_CLOSED;
316 stream_.mode = UNINITIALIZED;
317 stream_.apiHandle = 0;
318 stream_.userBuffer[0] = 0;
319 stream_.userBuffer[1] = 0;
320 MUTEX_INITIALIZE( &stream_.mutex );
321 showWarnings_ = true;
322 firstErrorOccurred_ = false;
327 MUTEX_DESTROY( &stream_.mutex );
330 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
331 RtAudio::StreamParameters *iParams,
332 RtAudioFormat format, unsigned int sampleRate,
333 unsigned int *bufferFrames,
334 RtAudioCallback callback, void *userData,
335 RtAudio::StreamOptions *options,
336 RtAudioErrorCallback errorCallback )
338 if ( stream_.state != STREAM_CLOSED ) {
339 errorText_ = "RtApi::openStream: a stream is already open!";
340 error( RtAudioError::INVALID_USE );
344 // Clear stream information potentially left from a previously open stream.
347 if ( oParams && oParams->nChannels < 1 ) {
348 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
349 error( RtAudioError::INVALID_USE );
353 if ( iParams && iParams->nChannels < 1 ) {
354 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
355 error( RtAudioError::INVALID_USE );
359 if ( oParams == NULL && iParams == NULL ) {
360 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
361 error( RtAudioError::INVALID_USE );
365 if ( formatBytes(format) == 0 ) {
366 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
367 error( RtAudioError::INVALID_USE );
371 unsigned int nDevices = getDeviceCount();
372 unsigned int oChannels = 0;
374 oChannels = oParams->nChannels;
375 if ( oParams->deviceId >= nDevices ) {
376 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
377 error( RtAudioError::INVALID_USE );
382 unsigned int iChannels = 0;
384 iChannels = iParams->nChannels;
385 if ( iParams->deviceId >= nDevices ) {
386 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
387 error( RtAudioError::INVALID_USE );
394 if ( oChannels > 0 ) {
396 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
397 sampleRate, format, bufferFrames, options );
398 if ( result == false ) {
399 error( RtAudioError::SYSTEM_ERROR );
404 if ( iChannels > 0 ) {
406 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
407 sampleRate, format, bufferFrames, options );
408 if ( result == false ) {
409 if ( oChannels > 0 ) closeStream();
410 error( RtAudioError::SYSTEM_ERROR );
415 stream_.callbackInfo.callback = (void *) callback;
416 stream_.callbackInfo.userData = userData;
417 stream_.callbackInfo.errorCallback = (void *) errorCallback;
419 if ( options ) options->numberOfBuffers = stream_.nBuffers;
420 stream_.state = STREAM_STOPPED;
423 unsigned int RtApi :: getDefaultInputDevice( void )
425 // Should be implemented in subclasses if possible.
429 unsigned int RtApi :: getDefaultOutputDevice( void )
431 // Should be implemented in subclasses if possible.
435 void RtApi :: closeStream( void )
437 // MUST be implemented in subclasses!
441 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
442 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
443 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
444 RtAudio::StreamOptions * /*options*/ )
446 // MUST be implemented in subclasses!
450 void RtApi :: tickStreamTime( void )
452 // Subclasses that do not provide their own implementation of
453 // getStreamTime should call this function once per buffer I/O to
454 // provide basic stream time support.
456 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
458 #if defined( HAVE_GETTIMEOFDAY )
459 gettimeofday( &stream_.lastTickTimestamp, NULL );
463 long RtApi :: getStreamLatency( void )
467 long totalLatency = 0;
468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
469 totalLatency = stream_.latency[0];
470 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
471 totalLatency += stream_.latency[1];
476 double RtApi :: getStreamTime( void )
480 #if defined( HAVE_GETTIMEOFDAY )
481 // Return a very accurate estimate of the stream time by
482 // adding in the elapsed time since the last tick.
486 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
487 return stream_.streamTime;
489 gettimeofday( &now, NULL );
490 then = stream_.lastTickTimestamp;
491 return stream_.streamTime +
492 ((now.tv_sec + 0.000001 * now.tv_usec) -
493 (then.tv_sec + 0.000001 * then.tv_usec));
495 return stream_.streamTime;
499 void RtApi :: setStreamTime( double time )
504 stream_.streamTime = time;
505 #if defined( HAVE_GETTIMEOFDAY )
506 gettimeofday( &stream_.lastTickTimestamp, NULL );
510 unsigned int RtApi :: getStreamSampleRate( void )
514 return stream_.sampleRate;
518 // *************************************************** //
520 // OS/API-specific methods.
522 // *************************************************** //
524 #if defined(__MACOSX_CORE__)
526 // The OS X CoreAudio API is designed to use a separate callback
527 // procedure for each of its audio devices. A single RtAudio duplex
528 // stream using two different devices is supported here, though it
529 // cannot be guaranteed to always behave correctly because we cannot
530 // synchronize these two callbacks.
532 // A property listener is installed for over/underrun information.
533 // However, no functionality is currently provided to allow property
534 // listeners to trigger user handlers because it is unclear what could
535 // be done if a critical stream parameter (buffer size, sample rate,
536 // device disconnect) notification arrived. The listeners entail
537 // quite a bit of extra code and most likely, a user program wouldn't
538 // be prepared for the result anyway. However, we do provide a flag
539 // to the client callback function to inform of an over/underrun.
541 // A structure to hold various information related to the CoreAudio API
544 AudioDeviceID id[2]; // device ids
545 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
546 AudioDeviceIOProcID procId[2];
548 UInt32 iStream[2]; // device stream index (or first if using multiple)
549 UInt32 nStreams[2]; // number of streams to use
552 pthread_cond_t condition;
553 int drainCounter; // Tracks callback counts when draining
554 bool internalDrain; // Indicates if stop is initiated from callback or not.
557 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
560 RtApiCore:: RtApiCore()
562 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
563 // This is a largely undocumented but absolutely necessary
564 // requirement starting with OS-X 10.6. If not called, queries and
565 // updates to various audio device properties are not handled
567 CFRunLoopRef theRunLoop = NULL;
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
569 kAudioObjectPropertyScopeGlobal,
570 kAudioObjectPropertyElementMaster };
571 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
572 if ( result != noErr ) {
573 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
574 error( RtAudioError::WARNING );
579 RtApiCore :: ~RtApiCore()
581 // The subclass destructor gets called before the base class
582 // destructor, so close an existing stream before deallocating
583 // apiDeviceId memory.
584 if ( stream_.state != STREAM_CLOSED ) closeStream();
587 unsigned int RtApiCore :: getDeviceCount( void )
589 // Find out how many audio devices there are, if any.
591 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
595 error( RtAudioError::WARNING );
599 return dataSize / sizeof( AudioDeviceID );
602 unsigned int RtApiCore :: getDefaultInputDevice( void )
604 unsigned int nDevices = getDeviceCount();
605 if ( nDevices <= 1 ) return 0;
608 UInt32 dataSize = sizeof( AudioDeviceID );
609 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
610 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
611 if ( result != noErr ) {
612 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
613 error( RtAudioError::WARNING );
617 dataSize *= nDevices;
618 AudioDeviceID deviceList[ nDevices ];
619 property.mSelector = kAudioHardwarePropertyDevices;
620 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
621 if ( result != noErr ) {
622 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
623 error( RtAudioError::WARNING );
627 for ( unsigned int i=0; i<nDevices; i++ )
628 if ( id == deviceList[i] ) return i;
630 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
631 error( RtAudioError::WARNING );
635 unsigned int RtApiCore :: getDefaultOutputDevice( void )
637 unsigned int nDevices = getDeviceCount();
638 if ( nDevices <= 1 ) return 0;
641 UInt32 dataSize = sizeof( AudioDeviceID );
642 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
643 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
644 if ( result != noErr ) {
645 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
646 error( RtAudioError::WARNING );
650 dataSize = sizeof( AudioDeviceID ) * nDevices;
651 AudioDeviceID deviceList[ nDevices ];
652 property.mSelector = kAudioHardwarePropertyDevices;
653 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
654 if ( result != noErr ) {
655 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
656 error( RtAudioError::WARNING );
660 for ( unsigned int i=0; i<nDevices; i++ )
661 if ( id == deviceList[i] ) return i;
663 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
664 error( RtAudioError::WARNING );
668 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
670 RtAudio::DeviceInfo info;
674 unsigned int nDevices = getDeviceCount();
675 if ( nDevices == 0 ) {
676 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
677 error( RtAudioError::INVALID_USE );
681 if ( device >= nDevices ) {
682 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
683 error( RtAudioError::INVALID_USE );
687 AudioDeviceID deviceList[ nDevices ];
688 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
689 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
690 kAudioObjectPropertyScopeGlobal,
691 kAudioObjectPropertyElementMaster };
692 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
693 0, NULL, &dataSize, (void *) &deviceList );
694 if ( result != noErr ) {
695 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
696 error( RtAudioError::WARNING );
700 AudioDeviceID id = deviceList[ device ];
702 // Get the device name.
705 dataSize = sizeof( CFStringRef );
706 property.mSelector = kAudioObjectPropertyManufacturer;
707 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
708 if ( result != noErr ) {
709 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
710 errorText_ = errorStream_.str();
711 error( RtAudioError::WARNING );
715 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
716 int length = CFStringGetLength(cfname);
717 char *mname = (char *)malloc(length * 3 + 1);
718 #if defined( UNICODE ) || defined( _UNICODE )
719 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
721 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
723 info.name.append( (const char *)mname, strlen(mname) );
724 info.name.append( ": " );
728 property.mSelector = kAudioObjectPropertyName;
729 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
730 if ( result != noErr ) {
731 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
732 errorText_ = errorStream_.str();
733 error( RtAudioError::WARNING );
737 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
738 length = CFStringGetLength(cfname);
739 char *name = (char *)malloc(length * 3 + 1);
740 #if defined( UNICODE ) || defined( _UNICODE )
741 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
743 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
745 info.name.append( (const char *)name, strlen(name) );
749 // Get the output stream "configuration".
750 AudioBufferList *bufferList = nil;
751 property.mSelector = kAudioDevicePropertyStreamConfiguration;
752 property.mScope = kAudioDevicePropertyScopeOutput;
753 // property.mElement = kAudioObjectPropertyElementWildcard;
755 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
756 if ( result != noErr || dataSize == 0 ) {
757 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
758 errorText_ = errorStream_.str();
759 error( RtAudioError::WARNING );
763 // Allocate the AudioBufferList.
764 bufferList = (AudioBufferList *) malloc( dataSize );
765 if ( bufferList == NULL ) {
766 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
767 error( RtAudioError::WARNING );
771 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
772 if ( result != noErr || dataSize == 0 ) {
774 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
775 errorText_ = errorStream_.str();
776 error( RtAudioError::WARNING );
780 // Get output channel information.
781 unsigned int i, nStreams = bufferList->mNumberBuffers;
782 for ( i=0; i<nStreams; i++ )
783 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
786 // Get the input stream "configuration".
787 property.mScope = kAudioDevicePropertyScopeInput;
788 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
789 if ( result != noErr || dataSize == 0 ) {
790 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
791 errorText_ = errorStream_.str();
792 error( RtAudioError::WARNING );
796 // Allocate the AudioBufferList.
797 bufferList = (AudioBufferList *) malloc( dataSize );
798 if ( bufferList == NULL ) {
799 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
800 error( RtAudioError::WARNING );
804 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
805 if (result != noErr || dataSize == 0) {
807 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
808 errorText_ = errorStream_.str();
809 error( RtAudioError::WARNING );
813 // Get input channel information.
814 nStreams = bufferList->mNumberBuffers;
815 for ( i=0; i<nStreams; i++ )
816 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
819 // If device opens for both playback and capture, we determine the channels.
820 if ( info.outputChannels > 0 && info.inputChannels > 0 )
821 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
823 // Probe the device sample rates.
824 bool isInput = false;
825 if ( info.outputChannels == 0 ) isInput = true;
827 // Determine the supported sample rates.
828 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
829 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
830 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
831 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
832 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
833 errorText_ = errorStream_.str();
834 error( RtAudioError::WARNING );
838 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
839 AudioValueRange rangeList[ nRanges ];
840 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
841 if ( result != kAudioHardwareNoError ) {
842 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
843 errorText_ = errorStream_.str();
844 error( RtAudioError::WARNING );
848 // The sample rate reporting mechanism is a bit of a mystery. It
849 // seems that it can either return individual rates or a range of
850 // rates. I assume that if the min / max range values are the same,
851 // then that represents a single supported rate and if the min / max
852 // range values are different, the device supports an arbitrary
853 // range of values (though there might be multiple ranges, so we'll
854 // use the most conservative range).
855 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
856 bool haveValueRange = false;
857 info.sampleRates.clear();
858 for ( UInt32 i=0; i<nRanges; i++ ) {
859 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
860 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
861 info.sampleRates.push_back( tmpSr );
863 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
864 info.preferredSampleRate = tmpSr;
867 haveValueRange = true;
868 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
869 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
873 if ( haveValueRange ) {
874 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
875 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
876 info.sampleRates.push_back( SAMPLE_RATES[k] );
878 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
879 info.preferredSampleRate = SAMPLE_RATES[k];
884 // Sort and remove any redundant values
885 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
886 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
888 if ( info.sampleRates.size() == 0 ) {
889 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
890 errorText_ = errorStream_.str();
891 error( RtAudioError::WARNING );
895 // CoreAudio always uses 32-bit floating point data for PCM streams.
896 // Thus, any other "physical" formats supported by the device are of
897 // no interest to the client.
898 info.nativeFormats = RTAUDIO_FLOAT32;
900 if ( info.outputChannels > 0 )
901 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
902 if ( info.inputChannels > 0 )
903 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
909 static OSStatus callbackHandler( AudioDeviceID inDevice,
910 const AudioTimeStamp* /*inNow*/,
911 const AudioBufferList* inInputData,
912 const AudioTimeStamp* /*inInputTime*/,
913 AudioBufferList* outOutputData,
914 const AudioTimeStamp* /*inOutputTime*/,
917 CallbackInfo *info = (CallbackInfo *) infoPointer;
919 RtApiCore *object = (RtApiCore *) info->object;
920 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
921 return kAudioHardwareUnspecifiedError;
923 return kAudioHardwareNoError;
926 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
928 const AudioObjectPropertyAddress properties[],
929 void* handlePointer )
931 CoreHandle *handle = (CoreHandle *) handlePointer;
932 for ( UInt32 i=0; i<nAddresses; i++ ) {
933 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
934 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
935 handle->xrun[1] = true;
937 handle->xrun[0] = true;
941 return kAudioHardwareNoError;
944 static OSStatus rateListener( AudioObjectID inDevice,
945 UInt32 /*nAddresses*/,
946 const AudioObjectPropertyAddress /*properties*/[],
949 Float64 *rate = (Float64 *) ratePointer;
950 UInt32 dataSize = sizeof( Float64 );
951 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
952 kAudioObjectPropertyScopeGlobal,
953 kAudioObjectPropertyElementMaster };
954 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
955 return kAudioHardwareNoError;
958 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
959 unsigned int firstChannel, unsigned int sampleRate,
960 RtAudioFormat format, unsigned int *bufferSize,
961 RtAudio::StreamOptions *options )
964 unsigned int nDevices = getDeviceCount();
965 if ( nDevices == 0 ) {
966 // This should not happen because a check is made before this function is called.
967 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
971 if ( device >= nDevices ) {
972 // This should not happen because a check is made before this function is called.
973 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
977 AudioDeviceID deviceList[ nDevices ];
978 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
979 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
980 kAudioObjectPropertyScopeGlobal,
981 kAudioObjectPropertyElementMaster };
982 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
983 0, NULL, &dataSize, (void *) &deviceList );
984 if ( result != noErr ) {
985 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
989 AudioDeviceID id = deviceList[ device ];
991 // Setup for stream mode.
992 bool isInput = false;
993 if ( mode == INPUT ) {
995 property.mScope = kAudioDevicePropertyScopeInput;
998 property.mScope = kAudioDevicePropertyScopeOutput;
1000 // Get the stream "configuration".
1001 AudioBufferList *bufferList = nil;
1003 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1004 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1005 if ( result != noErr || dataSize == 0 ) {
1006 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1007 errorText_ = errorStream_.str();
1011 // Allocate the AudioBufferList.
1012 bufferList = (AudioBufferList *) malloc( dataSize );
1013 if ( bufferList == NULL ) {
1014 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1019 if (result != noErr || dataSize == 0) {
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1022 errorText_ = errorStream_.str();
1026 // Search for one or more streams that contain the desired number of
1027 // channels. CoreAudio devices can have an arbitrary number of
1028 // streams and each stream can have an arbitrary number of channels.
1029 // For each stream, a single buffer of interleaved samples is
1030 // provided. RtAudio prefers the use of one stream of interleaved
1031 // data or multiple consecutive single-channel streams. However, we
1032 // now support multiple consecutive multi-channel streams of
1033 // interleaved data as well.
1034 UInt32 iStream, offsetCounter = firstChannel;
1035 UInt32 nStreams = bufferList->mNumberBuffers;
1036 bool monoMode = false;
1037 bool foundStream = false;
1039 // First check that the device supports the requested number of
1041 UInt32 deviceChannels = 0;
1042 for ( iStream=0; iStream<nStreams; iStream++ )
1043 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1045 if ( deviceChannels < ( channels + firstChannel ) ) {
1047 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1048 errorText_ = errorStream_.str();
1052 // Look for a single stream meeting our needs.
1053 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1054 for ( iStream=0; iStream<nStreams; iStream++ ) {
1055 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 if ( streamChannels >= channels + offsetCounter ) {
1057 firstStream = iStream;
1058 channelOffset = offsetCounter;
1062 if ( streamChannels > offsetCounter ) break;
1063 offsetCounter -= streamChannels;
1066 // If we didn't find a single stream above, then we should be able
1067 // to meet the channel specification with multiple streams.
1068 if ( foundStream == false ) {
1070 offsetCounter = firstChannel;
1071 for ( iStream=0; iStream<nStreams; iStream++ ) {
1072 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1073 if ( streamChannels > offsetCounter ) break;
1074 offsetCounter -= streamChannels;
1077 firstStream = iStream;
1078 channelOffset = offsetCounter;
1079 Int32 channelCounter = channels + offsetCounter - streamChannels;
1081 if ( streamChannels > 1 ) monoMode = false;
1082 while ( channelCounter > 0 ) {
1083 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1084 if ( streamChannels > 1 ) monoMode = false;
1085 channelCounter -= streamChannels;
1092 // Determine the buffer size.
1093 AudioValueRange bufferRange;
1094 dataSize = sizeof( AudioValueRange );
1095 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1096 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1104 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1105 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1106 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1108 // Set the buffer size. For multiple streams, I'm assuming we only
1109 // need to make this setting for the master channel.
1110 UInt32 theSize = (UInt32) *bufferSize;
1111 dataSize = sizeof( UInt32 );
1112 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1113 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1115 if ( result != noErr ) {
1116 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1117 errorText_ = errorStream_.str();
1121 // If attempting to setup a duplex stream, the bufferSize parameter
1122 // MUST be the same in both directions!
1123 *bufferSize = theSize;
1124 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1125 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1126 errorText_ = errorStream_.str();
1130 stream_.bufferSize = *bufferSize;
1131 stream_.nBuffers = 1;
1133 // Try to set "hog" mode ... it's not clear to me this is working.
1134 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1136 dataSize = sizeof( hog_pid );
1137 property.mSelector = kAudioDevicePropertyHogMode;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1141 errorText_ = errorStream_.str();
1145 if ( hog_pid != getpid() ) {
1147 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1148 if ( result != noErr ) {
1149 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1150 errorText_ = errorStream_.str();
1156 // Check and if necessary, change the sample rate for the device.
1157 Float64 nominalRate;
1158 dataSize = sizeof( Float64 );
1159 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1160 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1163 errorText_ = errorStream_.str();
1167 // Only change the sample rate if off by more than 1 Hz.
1168 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1170 // Set a property listener for the sample rate change
1171 Float64 reportedRate = 0.0;
1172 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1173 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1174 if ( result != noErr ) {
1175 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1176 errorText_ = errorStream_.str();
1180 nominalRate = (Float64) sampleRate;
1181 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1182 if ( result != noErr ) {
1183 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1184 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1189 // Now wait until the reported nominal rate is what we just set.
1190 UInt32 microCounter = 0;
1191 while ( reportedRate != nominalRate ) {
1192 microCounter += 5000;
1193 if ( microCounter > 5000000 ) break;
1197 // Remove the property listener.
1198 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1200 if ( microCounter > 5000000 ) {
1201 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1202 errorText_ = errorStream_.str();
1207 // Now set the stream format for all streams. Also, check the
1208 // physical format of the device and change that if necessary.
1209 AudioStreamBasicDescription description;
1210 dataSize = sizeof( AudioStreamBasicDescription );
1211 property.mSelector = kAudioStreamPropertyVirtualFormat;
1212 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1213 if ( result != noErr ) {
1214 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1215 errorText_ = errorStream_.str();
1219 // Set the sample rate and data format id. However, only make the
1220 // change if the sample rate is not within 1.0 of the desired
1221 // rate and the format is not linear pcm.
1222 bool updateFormat = false;
1223 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1224 description.mSampleRate = (Float64) sampleRate;
1225 updateFormat = true;
1228 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1229 description.mFormatID = kAudioFormatLinearPCM;
1230 updateFormat = true;
1233 if ( updateFormat ) {
1234 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1235 if ( result != noErr ) {
1236 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1237 errorText_ = errorStream_.str();
1242 // Now check the physical format.
1243 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1244 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1245 if ( result != noErr ) {
1246 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1247 errorText_ = errorStream_.str();
1251 //std::cout << "Current physical stream format:" << std::endl;
1252 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1253 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1254 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1255 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1257 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1258 description.mFormatID = kAudioFormatLinearPCM;
1259 //description.mSampleRate = (Float64) sampleRate;
1260 AudioStreamBasicDescription testDescription = description;
1263 // We'll try higher bit rates first and then work our way down.
1264 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1265 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1266 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1267 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1268 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1269 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1270 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1271 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1272 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1273 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1274 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1275 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1276 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1278 bool setPhysicalFormat = false;
1279 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1280 testDescription = description;
1281 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1282 testDescription.mFormatFlags = physicalFormats[i].second;
1283 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1284 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1286 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1287 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1288 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1289 if ( result == noErr ) {
1290 setPhysicalFormat = true;
1291 //std::cout << "Updated physical stream format:" << std::endl;
1292 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1293 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1294 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1295 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1300 if ( !setPhysicalFormat ) {
1301 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1302 errorText_ = errorStream_.str();
1305 } // done setting virtual/physical formats.
1307 // Get the stream / device latency.
1309 dataSize = sizeof( UInt32 );
1310 property.mSelector = kAudioDevicePropertyLatency;
1311 if ( AudioObjectHasProperty( id, &property ) == true ) {
1312 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1313 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1315 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1316 errorText_ = errorStream_.str();
1317 error( RtAudioError::WARNING );
1321 // Byte-swapping: According to AudioHardware.h, the stream data will
1322 // always be presented in native-endian format, so we should never
1323 // need to byte swap.
1324 stream_.doByteSwap[mode] = false;
1326 // From the CoreAudio documentation, PCM data must be supplied as
1328 stream_.userFormat = format;
1329 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1331 if ( streamCount == 1 )
1332 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1333 else // multiple streams
1334 stream_.nDeviceChannels[mode] = channels;
1335 stream_.nUserChannels[mode] = channels;
1336 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1337 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1338 else stream_.userInterleaved = true;
1339 stream_.deviceInterleaved[mode] = true;
1340 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1342 // Set flags for buffer conversion.
1343 stream_.doConvertBuffer[mode] = false;
1344 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1345 stream_.doConvertBuffer[mode] = true;
1346 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1347 stream_.doConvertBuffer[mode] = true;
1348 if ( streamCount == 1 ) {
1349 if ( stream_.nUserChannels[mode] > 1 &&
1350 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1351 stream_.doConvertBuffer[mode] = true;
1353 else if ( monoMode && stream_.userInterleaved )
1354 stream_.doConvertBuffer[mode] = true;
1356 // Allocate our CoreHandle structure for the stream.
1357 CoreHandle *handle = 0;
1358 if ( stream_.apiHandle == 0 ) {
1360 handle = new CoreHandle;
1362 catch ( std::bad_alloc& ) {
1363 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1367 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1371 stream_.apiHandle = (void *) handle;
1374 handle = (CoreHandle *) stream_.apiHandle;
1375 handle->iStream[mode] = firstStream;
1376 handle->nStreams[mode] = streamCount;
1377 handle->id[mode] = id;
1379 // Allocate necessary internal buffers.
1380 unsigned long bufferBytes;
1381 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1382 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1383 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1384 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1385 if ( stream_.userBuffer[mode] == NULL ) {
1386 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1390 // If possible, we will make use of the CoreAudio stream buffers as
1391 // "device buffers". However, we can't do this if using multiple
1393 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1395 bool makeBuffer = true;
1396 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1397 if ( mode == INPUT ) {
1398 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1399 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1400 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1405 bufferBytes *= *bufferSize;
1406 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1407 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1408 if ( stream_.deviceBuffer == NULL ) {
1409 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1415 stream_.sampleRate = sampleRate;
1416 stream_.device[mode] = device;
1417 stream_.state = STREAM_STOPPED;
1418 stream_.callbackInfo.object = (void *) this;
1420 // Setup the buffer conversion information structure.
1421 if ( stream_.doConvertBuffer[mode] ) {
1422 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1423 else setConvertInfo( mode, channelOffset );
1426 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1427 // Only one callback procedure per device.
1428 stream_.mode = DUPLEX;
1430 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1431 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1433 // deprecated in favor of AudioDeviceCreateIOProcID()
1434 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1436 if ( result != noErr ) {
1437 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1438 errorText_ = errorStream_.str();
1441 if ( stream_.mode == OUTPUT && mode == INPUT )
1442 stream_.mode = DUPLEX;
1444 stream_.mode = mode;
1447 // Setup the device property listener for over/underload.
1448 property.mSelector = kAudioDeviceProcessorOverload;
1449 property.mScope = kAudioObjectPropertyScopeGlobal;
1450 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1456 pthread_cond_destroy( &handle->condition );
1458 stream_.apiHandle = 0;
1461 for ( int i=0; i<2; i++ ) {
1462 if ( stream_.userBuffer[i] ) {
1463 free( stream_.userBuffer[i] );
1464 stream_.userBuffer[i] = 0;
1468 if ( stream_.deviceBuffer ) {
1469 free( stream_.deviceBuffer );
1470 stream_.deviceBuffer = 0;
1473 stream_.state = STREAM_CLOSED;
1477 void RtApiCore :: closeStream( void )
1479 if ( stream_.state == STREAM_CLOSED ) {
1480 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1481 error( RtAudioError::WARNING );
1485 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1488 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1489 kAudioObjectPropertyScopeGlobal,
1490 kAudioObjectPropertyElementMaster };
1492 property.mSelector = kAudioDeviceProcessorOverload;
1493 property.mScope = kAudioObjectPropertyScopeGlobal;
1494 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1495 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1496 error( RtAudioError::WARNING );
1499 if ( stream_.state == STREAM_RUNNING )
1500 AudioDeviceStop( handle->id[0], callbackHandler );
1501 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1502 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1504 // deprecated in favor of AudioDeviceDestroyIOProcID()
1505 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1509 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1511 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1512 kAudioObjectPropertyScopeGlobal,
1513 kAudioObjectPropertyElementMaster };
1515 property.mSelector = kAudioDeviceProcessorOverload;
1516 property.mScope = kAudioObjectPropertyScopeGlobal;
1517 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1518 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1519 error( RtAudioError::WARNING );
1522 if ( stream_.state == STREAM_RUNNING )
1523 AudioDeviceStop( handle->id[1], callbackHandler );
1524 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1525 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1527 // deprecated in favor of AudioDeviceDestroyIOProcID()
1528 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1532 for ( int i=0; i<2; i++ ) {
1533 if ( stream_.userBuffer[i] ) {
1534 free( stream_.userBuffer[i] );
1535 stream_.userBuffer[i] = 0;
1539 if ( stream_.deviceBuffer ) {
1540 free( stream_.deviceBuffer );
1541 stream_.deviceBuffer = 0;
1544 // Destroy pthread condition variable.
1545 pthread_cond_destroy( &handle->condition );
1547 stream_.apiHandle = 0;
1549 stream_.mode = UNINITIALIZED;
1550 stream_.state = STREAM_CLOSED;
1553 void RtApiCore :: startStream( void )
1556 if ( stream_.state == STREAM_RUNNING ) {
1557 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1558 error( RtAudioError::WARNING );
1562 OSStatus result = noErr;
1563 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1564 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1566 result = AudioDeviceStart( handle->id[0], callbackHandler );
1567 if ( result != noErr ) {
1568 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1569 errorText_ = errorStream_.str();
1574 if ( stream_.mode == INPUT ||
1575 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1577 result = AudioDeviceStart( handle->id[1], callbackHandler );
1578 if ( result != noErr ) {
1579 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1580 errorText_ = errorStream_.str();
1585 handle->drainCounter = 0;
1586 handle->internalDrain = false;
1587 stream_.state = STREAM_RUNNING;
1590 if ( result == noErr ) return;
1591 error( RtAudioError::SYSTEM_ERROR );
1594 void RtApiCore :: stopStream( void )
1597 if ( stream_.state == STREAM_STOPPED ) {
1598 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1599 error( RtAudioError::WARNING );
1603 OSStatus result = noErr;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1605 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1607 if ( handle->drainCounter == 0 ) {
1608 handle->drainCounter = 2;
1609 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1612 result = AudioDeviceStop( handle->id[0], callbackHandler );
1613 if ( result != noErr ) {
1614 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1615 errorText_ = errorStream_.str();
1620 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1622 result = AudioDeviceStop( handle->id[1], callbackHandler );
1623 if ( result != noErr ) {
1624 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1625 errorText_ = errorStream_.str();
1630 stream_.state = STREAM_STOPPED;
1633 if ( result == noErr ) return;
1634 error( RtAudioError::SYSTEM_ERROR );
1637 void RtApiCore :: abortStream( void )
1640 if ( stream_.state == STREAM_STOPPED ) {
1641 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1642 error( RtAudioError::WARNING );
1646 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1647 handle->drainCounter = 2;
1652 // This function will be called by a spawned thread when the user
1653 // callback function signals that the stream should be stopped or
1654 // aborted. It is better to handle it this way because the
1655 // callbackEvent() function probably should return before the AudioDeviceStop()
1656 // function is called.
1657 static void *coreStopStream( void *ptr )
1659 CallbackInfo *info = (CallbackInfo *) ptr;
1660 RtApiCore *object = (RtApiCore *) info->object;
1662 object->stopStream();
1663 pthread_exit( NULL );
1666 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1667 const AudioBufferList *inBufferList,
1668 const AudioBufferList *outBufferList )
1670 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1671 if ( stream_.state == STREAM_CLOSED ) {
1672 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1673 error( RtAudioError::WARNING );
1677 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1678 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1680 // Check if we were draining the stream and signal is finished.
1681 if ( handle->drainCounter > 3 ) {
1682 ThreadHandle threadId;
1684 stream_.state = STREAM_STOPPING;
1685 if ( handle->internalDrain == true )
1686 pthread_create( &threadId, NULL, coreStopStream, info );
1687 else // external call to stopStream()
1688 pthread_cond_signal( &handle->condition );
1692 AudioDeviceID outputDevice = handle->id[0];
1694 // Invoke user callback to get fresh output data UNLESS we are
1695 // draining stream or duplex mode AND the input/output devices are
1696 // different AND this function is called for the input device.
1697 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1698 RtAudioCallback callback = (RtAudioCallback) info->callback;
1699 double streamTime = getStreamTime();
1700 RtAudioStreamStatus status = 0;
1701 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1702 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1703 handle->xrun[0] = false;
1705 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1706 status |= RTAUDIO_INPUT_OVERFLOW;
1707 handle->xrun[1] = false;
1710 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1711 stream_.bufferSize, streamTime, status, info->userData );
1712 if ( cbReturnValue == 2 ) {
1713 stream_.state = STREAM_STOPPING;
1714 handle->drainCounter = 2;
1718 else if ( cbReturnValue == 1 ) {
1719 handle->drainCounter = 1;
1720 handle->internalDrain = true;
1724 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1726 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1728 if ( handle->nStreams[0] == 1 ) {
1729 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1731 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1733 else { // fill multiple streams with zeros
1734 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1735 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1737 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1741 else if ( handle->nStreams[0] == 1 ) {
1742 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1743 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1744 stream_.userBuffer[0], stream_.convertInfo[0] );
1746 else { // copy from user buffer
1747 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1748 stream_.userBuffer[0],
1749 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1752 else { // fill multiple streams
1753 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1754 if ( stream_.doConvertBuffer[0] ) {
1755 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1756 inBuffer = (Float32 *) stream_.deviceBuffer;
1759 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1760 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1761 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1762 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1763 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1766 else { // fill multiple multi-channel streams with interleaved data
1767 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1770 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1771 UInt32 inChannels = stream_.nUserChannels[0];
1772 if ( stream_.doConvertBuffer[0] ) {
1773 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1774 inChannels = stream_.nDeviceChannels[0];
1777 if ( inInterleaved ) inOffset = 1;
1778 else inOffset = stream_.bufferSize;
1780 channelsLeft = inChannels;
1781 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1783 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1784 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1787 // Account for possible channel offset in first stream
1788 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1789 streamChannels -= stream_.channelOffset[0];
1790 outJump = stream_.channelOffset[0];
1794 // Account for possible unfilled channels at end of the last stream
1795 if ( streamChannels > channelsLeft ) {
1796 outJump = streamChannels - channelsLeft;
1797 streamChannels = channelsLeft;
1800 // Determine input buffer offsets and skips
1801 if ( inInterleaved ) {
1802 inJump = inChannels;
1803 in += inChannels - channelsLeft;
1807 in += (inChannels - channelsLeft) * inOffset;
1810 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1811 for ( unsigned int j=0; j<streamChannels; j++ ) {
1812 *out++ = in[j*inOffset];
1817 channelsLeft -= streamChannels;
1823 // Don't bother draining input
1824 if ( handle->drainCounter ) {
1825 handle->drainCounter++;
1829 AudioDeviceID inputDevice;
1830 inputDevice = handle->id[1];
1831 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1833 if ( handle->nStreams[1] == 1 ) {
1834 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1835 convertBuffer( stream_.userBuffer[1],
1836 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1837 stream_.convertInfo[1] );
1839 else { // copy to user buffer
1840 memcpy( stream_.userBuffer[1],
1841 inBufferList->mBuffers[handle->iStream[1]].mData,
1842 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1845 else { // read from multiple streams
1846 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1847 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1849 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1850 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1851 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1852 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1853 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1856 else { // read from multiple multi-channel streams
1857 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1860 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1861 UInt32 outChannels = stream_.nUserChannels[1];
1862 if ( stream_.doConvertBuffer[1] ) {
1863 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1864 outChannels = stream_.nDeviceChannels[1];
1867 if ( outInterleaved ) outOffset = 1;
1868 else outOffset = stream_.bufferSize;
1870 channelsLeft = outChannels;
1871 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1873 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1874 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1877 // Account for possible channel offset in first stream
1878 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1879 streamChannels -= stream_.channelOffset[1];
1880 inJump = stream_.channelOffset[1];
1884 // Account for possible unread channels at end of the last stream
1885 if ( streamChannels > channelsLeft ) {
1886 inJump = streamChannels - channelsLeft;
1887 streamChannels = channelsLeft;
1890 // Determine output buffer offsets and skips
1891 if ( outInterleaved ) {
1892 outJump = outChannels;
1893 out += outChannels - channelsLeft;
1897 out += (outChannels - channelsLeft) * outOffset;
1900 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1901 for ( unsigned int j=0; j<streamChannels; j++ ) {
1902 out[j*outOffset] = *in++;
1907 channelsLeft -= streamChannels;
1911 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1912 convertBuffer( stream_.userBuffer[1],
1913 stream_.deviceBuffer,
1914 stream_.convertInfo[1] );
1920 //MUTEX_UNLOCK( &stream_.mutex );
1922 RtApi::tickStreamTime();
1926 const char* RtApiCore :: getErrorCode( OSStatus code )
1930 case kAudioHardwareNotRunningError:
1931 return "kAudioHardwareNotRunningError";
1933 case kAudioHardwareUnspecifiedError:
1934 return "kAudioHardwareUnspecifiedError";
1936 case kAudioHardwareUnknownPropertyError:
1937 return "kAudioHardwareUnknownPropertyError";
1939 case kAudioHardwareBadPropertySizeError:
1940 return "kAudioHardwareBadPropertySizeError";
1942 case kAudioHardwareIllegalOperationError:
1943 return "kAudioHardwareIllegalOperationError";
1945 case kAudioHardwareBadObjectError:
1946 return "kAudioHardwareBadObjectError";
1948 case kAudioHardwareBadDeviceError:
1949 return "kAudioHardwareBadDeviceError";
1951 case kAudioHardwareBadStreamError:
1952 return "kAudioHardwareBadStreamError";
1954 case kAudioHardwareUnsupportedOperationError:
1955 return "kAudioHardwareUnsupportedOperationError";
1957 case kAudioDeviceUnsupportedFormatError:
1958 return "kAudioDeviceUnsupportedFormatError";
1960 case kAudioDevicePermissionsError:
1961 return "kAudioDevicePermissionsError";
1964 return "CoreAudio unknown error";
1968 //******************** End of __MACOSX_CORE__ *********************//
1971 #if defined(__UNIX_JACK__)
1973 // JACK is a low-latency audio server, originally written for the
1974 // GNU/Linux operating system and now also ported to OS-X. It can
1975 // connect a number of different applications to an audio device, as
1976 // well as allowing them to share audio between themselves.
1978 // When using JACK with RtAudio, "devices" refer to JACK clients that
1979 // have ports connected to the server. The JACK server is typically
1980 // started in a terminal as follows:
1982 // .jackd -d alsa -d hw:0
1984 // or through an interface program such as qjackctl. Many of the
1985 // parameters normally set for a stream are fixed by the JACK server
1986 // and can be specified when the JACK server is started. In
1989 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1991 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1992 // frames, and number of buffers = 4. Once the server is running, it
1993 // is not possible to override these values. If the values are not
1994 // specified in the command-line, the JACK server uses default values.
1996 // The JACK server does not have to be running when an instance of
1997 // RtApiJack is created, though the function getDeviceCount() will
1998 // report 0 devices found until JACK has been started. When no
1999 // devices are available (i.e., the JACK server is not running), a
2000 // stream cannot be opened.
2002 #include <jack/jack.h>
2006 // A structure to hold various information related to the Jack API
2009 jack_client_t *client;
2010 jack_port_t **ports[2];
2011 std::string deviceName[2];
2013 pthread_cond_t condition;
2014 int drainCounter; // Tracks callback counts when draining
2015 bool internalDrain; // Indicates if stop is initiated from callback or not.
2018 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2021 #if !defined(__RTAUDIO_DEBUG__)
2022 static void jackSilentError( const char * ) {};
2025 RtApiJack :: RtApiJack()
2026 :shouldAutoconnect_(true) {
2027 // Nothing to do here.
2028 #if !defined(__RTAUDIO_DEBUG__)
2029 // Turn off Jack's internal error reporting.
2030 jack_set_error_function( &jackSilentError );
2034 RtApiJack :: ~RtApiJack()
2036 if ( stream_.state != STREAM_CLOSED ) closeStream();
2039 unsigned int RtApiJack :: getDeviceCount( void )
2041 // See if we can become a jack client.
2042 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2043 jack_status_t *status = NULL;
2044 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2045 if ( client == 0 ) return 0;
2048 std::string port, previousPort;
2049 unsigned int nChannels = 0, nDevices = 0;
2050 ports = jack_get_ports( client, NULL, NULL, 0 );
2052 // Parse the port names up to the first colon (:).
2055 port = (char *) ports[ nChannels ];
2056 iColon = port.find(":");
2057 if ( iColon != std::string::npos ) {
2058 port = port.substr( 0, iColon + 1 );
2059 if ( port != previousPort ) {
2061 previousPort = port;
2064 } while ( ports[++nChannels] );
2068 jack_client_close( client );
2072 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2074 RtAudio::DeviceInfo info;
2075 info.probed = false;
2077 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2078 jack_status_t *status = NULL;
2079 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2080 if ( client == 0 ) {
2081 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2082 error( RtAudioError::WARNING );
2087 std::string port, previousPort;
2088 unsigned int nPorts = 0, nDevices = 0;
2089 ports = jack_get_ports( client, NULL, NULL, 0 );
2091 // Parse the port names up to the first colon (:).
2094 port = (char *) ports[ nPorts ];
2095 iColon = port.find(":");
2096 if ( iColon != std::string::npos ) {
2097 port = port.substr( 0, iColon );
2098 if ( port != previousPort ) {
2099 if ( nDevices == device ) info.name = port;
2101 previousPort = port;
2104 } while ( ports[++nPorts] );
2108 if ( device >= nDevices ) {
2109 jack_client_close( client );
2110 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2111 error( RtAudioError::INVALID_USE );
2115 // Get the current jack server sample rate.
2116 info.sampleRates.clear();
2118 info.preferredSampleRate = jack_get_sample_rate( client );
2119 info.sampleRates.push_back( info.preferredSampleRate );
2121 // Count the available ports containing the client name as device
2122 // channels. Jack "input ports" equal RtAudio output channels.
2123 unsigned int nChannels = 0;
2124 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2126 while ( ports[ nChannels ] ) nChannels++;
2128 info.outputChannels = nChannels;
2131 // Jack "output ports" equal RtAudio input channels.
2133 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2135 while ( ports[ nChannels ] ) nChannels++;
2137 info.inputChannels = nChannels;
2140 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2141 jack_client_close(client);
2142 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2143 error( RtAudioError::WARNING );
2147 // If device opens for both playback and capture, we determine the channels.
2148 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2149 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2151 // Jack always uses 32-bit floats.
2152 info.nativeFormats = RTAUDIO_FLOAT32;
2154 // Jack doesn't provide default devices so we'll use the first available one.
2155 if ( device == 0 && info.outputChannels > 0 )
2156 info.isDefaultOutput = true;
2157 if ( device == 0 && info.inputChannels > 0 )
2158 info.isDefaultInput = true;
2160 jack_client_close(client);
2165 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2167 CallbackInfo *info = (CallbackInfo *) infoPointer;
2169 RtApiJack *object = (RtApiJack *) info->object;
2170 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2175 // This function will be called by a spawned thread when the Jack
2176 // server signals that it is shutting down. It is necessary to handle
2177 // it this way because the jackShutdown() function must return before
2178 // the jack_deactivate() function (in closeStream()) will return.
2179 static void *jackCloseStream( void *ptr )
2181 CallbackInfo *info = (CallbackInfo *) ptr;
2182 RtApiJack *object = (RtApiJack *) info->object;
2184 object->closeStream();
2186 pthread_exit( NULL );
2188 static void jackShutdown( void *infoPointer )
2190 CallbackInfo *info = (CallbackInfo *) infoPointer;
2191 RtApiJack *object = (RtApiJack *) info->object;
2193 // Check current stream state. If stopped, then we'll assume this
2194 // was called as a result of a call to RtApiJack::stopStream (the
2195 // deactivation of a client handle causes this function to be called).
2196 // If not, we'll assume the Jack server is shutting down or some
2197 // other problem occurred and we should close the stream.
2198 if ( object->isStreamRunning() == false ) return;
2200 ThreadHandle threadId;
2201 pthread_create( &threadId, NULL, jackCloseStream, info );
2202 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2205 static int jackXrun( void *infoPointer )
2207 JackHandle *handle = *((JackHandle **) infoPointer);
2209 if ( handle->ports[0] ) handle->xrun[0] = true;
2210 if ( handle->ports[1] ) handle->xrun[1] = true;
2215 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2216 unsigned int firstChannel, unsigned int sampleRate,
2217 RtAudioFormat format, unsigned int *bufferSize,
2218 RtAudio::StreamOptions *options )
2220 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2222 // Look for jack server and try to become a client (only do once per stream).
2223 jack_client_t *client = 0;
2224 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2225 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2226 jack_status_t *status = NULL;
2227 if ( options && !options->streamName.empty() )
2228 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2230 client = jack_client_open( "RtApiJack", jackoptions, status );
2231 if ( client == 0 ) {
2232 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2233 error( RtAudioError::WARNING );
2238 // The handle must have been created on an earlier pass.
2239 client = handle->client;
2243 std::string port, previousPort, deviceName;
2244 unsigned int nPorts = 0, nDevices = 0;
2245 ports = jack_get_ports( client, NULL, NULL, 0 );
2247 // Parse the port names up to the first colon (:).
2250 port = (char *) ports[ nPorts ];
2251 iColon = port.find(":");
2252 if ( iColon != std::string::npos ) {
2253 port = port.substr( 0, iColon );
2254 if ( port != previousPort ) {
2255 if ( nDevices == device ) deviceName = port;
2257 previousPort = port;
2260 } while ( ports[++nPorts] );
2264 if ( device >= nDevices ) {
2265 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2269 // Count the available ports containing the client name as device
2270 // channels. Jack "input ports" equal RtAudio output channels.
2271 unsigned int nChannels = 0;
2272 unsigned long flag = JackPortIsInput;
2273 if ( mode == INPUT ) flag = JackPortIsOutput;
2274 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2276 while ( ports[ nChannels ] ) nChannels++;
2280 // Compare the jack ports for specified client to the requested number of channels.
2281 if ( nChannels < (channels + firstChannel) ) {
2282 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2283 errorText_ = errorStream_.str();
2287 // Check the jack server sample rate.
2288 unsigned int jackRate = jack_get_sample_rate( client );
2289 if ( sampleRate != jackRate ) {
2290 jack_client_close( client );
2291 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2292 errorText_ = errorStream_.str();
2295 stream_.sampleRate = jackRate;
2297 // Get the latency of the JACK port.
2298 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2299 if ( ports[ firstChannel ] ) {
2301 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2302 // the range (usually the min and max are equal)
2303 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2304 // get the latency range
2305 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2306 // be optimistic, use the min!
2307 stream_.latency[mode] = latrange.min;
2308 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2312 // The jack server always uses 32-bit floating-point data.
2313 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2314 stream_.userFormat = format;
2316 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2317 else stream_.userInterleaved = true;
2319 // Jack always uses non-interleaved buffers.
2320 stream_.deviceInterleaved[mode] = false;
2322 // Jack always provides host byte-ordered data.
2323 stream_.doByteSwap[mode] = false;
2325 // Get the buffer size. The buffer size and number of buffers
2326 // (periods) is set when the jack server is started.
2327 stream_.bufferSize = (int) jack_get_buffer_size( client );
2328 *bufferSize = stream_.bufferSize;
2330 stream_.nDeviceChannels[mode] = channels;
2331 stream_.nUserChannels[mode] = channels;
2333 // Set flags for buffer conversion.
2334 stream_.doConvertBuffer[mode] = false;
2335 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2336 stream_.doConvertBuffer[mode] = true;
2337 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2338 stream_.nUserChannels[mode] > 1 )
2339 stream_.doConvertBuffer[mode] = true;
2341 // Allocate our JackHandle structure for the stream.
2342 if ( handle == 0 ) {
2344 handle = new JackHandle;
2346 catch ( std::bad_alloc& ) {
2347 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2351 if ( pthread_cond_init(&handle->condition, NULL) ) {
2352 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2355 stream_.apiHandle = (void *) handle;
2356 handle->client = client;
2358 handle->deviceName[mode] = deviceName;
2360 // Allocate necessary internal buffers.
2361 unsigned long bufferBytes;
2362 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2363 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2364 if ( stream_.userBuffer[mode] == NULL ) {
2365 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2369 if ( stream_.doConvertBuffer[mode] ) {
2371 bool makeBuffer = true;
2372 if ( mode == OUTPUT )
2373 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2374 else { // mode == INPUT
2375 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2376 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2377 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2378 if ( bufferBytes < bytesOut ) makeBuffer = false;
2383 bufferBytes *= *bufferSize;
2384 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2385 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2386 if ( stream_.deviceBuffer == NULL ) {
2387 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2393 // Allocate memory for the Jack ports (channels) identifiers.
2394 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2395 if ( handle->ports[mode] == NULL ) {
2396 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2400 stream_.device[mode] = device;
2401 stream_.channelOffset[mode] = firstChannel;
2402 stream_.state = STREAM_STOPPED;
2403 stream_.callbackInfo.object = (void *) this;
2405 if ( stream_.mode == OUTPUT && mode == INPUT )
2406 // We had already set up the stream for output.
2407 stream_.mode = DUPLEX;
2409 stream_.mode = mode;
2410 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2411 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2412 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2415 // Register our ports.
2417 if ( mode == OUTPUT ) {
2418 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2419 snprintf( label, 64, "outport %d", i );
2420 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2421 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2425 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2426 snprintf( label, 64, "inport %d", i );
2427 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2428 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2432 // Setup the buffer conversion information structure. We don't use
2433 // buffers to do channel offsets, so we override that parameter
2435 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2437 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2443 pthread_cond_destroy( &handle->condition );
2444 jack_client_close( handle->client );
2446 if ( handle->ports[0] ) free( handle->ports[0] );
2447 if ( handle->ports[1] ) free( handle->ports[1] );
2450 stream_.apiHandle = 0;
2453 for ( int i=0; i<2; i++ ) {
2454 if ( stream_.userBuffer[i] ) {
2455 free( stream_.userBuffer[i] );
2456 stream_.userBuffer[i] = 0;
2460 if ( stream_.deviceBuffer ) {
2461 free( stream_.deviceBuffer );
2462 stream_.deviceBuffer = 0;
2468 void RtApiJack :: closeStream( void )
2470 if ( stream_.state == STREAM_CLOSED ) {
2471 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2472 error( RtAudioError::WARNING );
2476 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2479 if ( stream_.state == STREAM_RUNNING )
2480 jack_deactivate( handle->client );
2482 jack_client_close( handle->client );
2486 if ( handle->ports[0] ) free( handle->ports[0] );
2487 if ( handle->ports[1] ) free( handle->ports[1] );
2488 pthread_cond_destroy( &handle->condition );
2490 stream_.apiHandle = 0;
2493 for ( int i=0; i<2; i++ ) {
2494 if ( stream_.userBuffer[i] ) {
2495 free( stream_.userBuffer[i] );
2496 stream_.userBuffer[i] = 0;
2500 if ( stream_.deviceBuffer ) {
2501 free( stream_.deviceBuffer );
2502 stream_.deviceBuffer = 0;
2505 stream_.mode = UNINITIALIZED;
2506 stream_.state = STREAM_CLOSED;
2509 void RtApiJack :: startStream( void )
2512 if ( stream_.state == STREAM_RUNNING ) {
2513 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2514 error( RtAudioError::WARNING );
2518 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2519 int result = jack_activate( handle->client );
2521 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2527 // Get the list of available ports.
2528 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2530 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2531 if ( ports == NULL) {
2532 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2536 // Now make the port connections. Since RtAudio wasn't designed to
2537 // allow the user to select particular channels of a device, we'll
2538 // just open the first "nChannels" ports with offset.
2539 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2541 if ( ports[ stream_.channelOffset[0] + i ] )
2542 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2545 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2552 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2554 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2555 if ( ports == NULL) {
2556 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2560 // Now make the port connections. See note above.
2561 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2563 if ( ports[ stream_.channelOffset[1] + i ] )
2564 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2567 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2574 handle->drainCounter = 0;
2575 handle->internalDrain = false;
2576 stream_.state = STREAM_RUNNING;
2579 if ( result == 0 ) return;
2580 error( RtAudioError::SYSTEM_ERROR );
2583 void RtApiJack :: stopStream( void )
2586 if ( stream_.state == STREAM_STOPPED ) {
2587 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2588 error( RtAudioError::WARNING );
2592 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2593 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2595 if ( handle->drainCounter == 0 ) {
2596 handle->drainCounter = 2;
2597 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2601 jack_deactivate( handle->client );
2602 stream_.state = STREAM_STOPPED;
2605 void RtApiJack :: abortStream( void )
2608 if ( stream_.state == STREAM_STOPPED ) {
2609 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2610 error( RtAudioError::WARNING );
2614 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2615 handle->drainCounter = 2;
2620 // This function will be called by a spawned thread when the user
2621 // callback function signals that the stream should be stopped or
2622 // aborted. It is necessary to handle it this way because the
2623 // callbackEvent() function must return before the jack_deactivate()
2624 // function will return.
2625 static void *jackStopStream( void *ptr )
2627 CallbackInfo *info = (CallbackInfo *) ptr;
2628 RtApiJack *object = (RtApiJack *) info->object;
2630 object->stopStream();
2631 pthread_exit( NULL );
2634 bool RtApiJack :: callbackEvent( unsigned long nframes )
2636 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2637 if ( stream_.state == STREAM_CLOSED ) {
2638 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2639 error( RtAudioError::WARNING );
2642 if ( stream_.bufferSize != nframes ) {
2643 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2644 error( RtAudioError::WARNING );
2648 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2649 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2651 // Check if we were draining the stream and signal is finished.
2652 if ( handle->drainCounter > 3 ) {
2653 ThreadHandle threadId;
2655 stream_.state = STREAM_STOPPING;
2656 if ( handle->internalDrain == true )
2657 pthread_create( &threadId, NULL, jackStopStream, info );
2659 pthread_cond_signal( &handle->condition );
2663 // Invoke user callback first, to get fresh output data.
2664 if ( handle->drainCounter == 0 ) {
2665 RtAudioCallback callback = (RtAudioCallback) info->callback;
2666 double streamTime = getStreamTime();
2667 RtAudioStreamStatus status = 0;
2668 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2669 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2670 handle->xrun[0] = false;
2672 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2673 status |= RTAUDIO_INPUT_OVERFLOW;
2674 handle->xrun[1] = false;
2676 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2677 stream_.bufferSize, streamTime, status, info->userData );
2678 if ( cbReturnValue == 2 ) {
2679 stream_.state = STREAM_STOPPING;
2680 handle->drainCounter = 2;
2682 pthread_create( &id, NULL, jackStopStream, info );
2685 else if ( cbReturnValue == 1 ) {
2686 handle->drainCounter = 1;
2687 handle->internalDrain = true;
2691 jack_default_audio_sample_t *jackbuffer;
2692 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2693 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2695 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2697 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2698 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2699 memset( jackbuffer, 0, bufferBytes );
2703 else if ( stream_.doConvertBuffer[0] ) {
2705 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2707 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2708 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2709 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2712 else { // no buffer conversion
2713 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2714 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2715 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2720 // Don't bother draining input
2721 if ( handle->drainCounter ) {
2722 handle->drainCounter++;
2726 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2728 if ( stream_.doConvertBuffer[1] ) {
2729 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2730 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2731 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2733 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2735 else { // no buffer conversion
2736 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2737 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2738 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2744 RtApi::tickStreamTime();
2747 //******************** End of __UNIX_JACK__ *********************//
2750 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2752 // The ASIO API is designed around a callback scheme, so this
2753 // implementation is similar to that used for OS-X CoreAudio and Linux
2754 // Jack. The primary constraint with ASIO is that it only allows
2755 // access to a single driver at a time. Thus, it is not possible to
2756 // have more than one simultaneous RtAudio stream.
2758 // This implementation also requires a number of external ASIO files
2759 // and a few global variables. The ASIO callback scheme does not
2760 // allow for the passing of user data, so we must create a global
2761 // pointer to our callbackInfo structure.
2763 // On unix systems, we make use of a pthread condition variable.
2764 // Since there is no equivalent in Windows, I hacked something based
2765 // on information found in
2766 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2768 #include "asiosys.h"
2770 #include "iasiothiscallresolver.h"
2771 #include "asiodrivers.h"
2774 static AsioDrivers drivers;
2775 static ASIOCallbacks asioCallbacks;
2776 static ASIODriverInfo driverInfo;
2777 static CallbackInfo *asioCallbackInfo;
2778 static bool asioXRun;
2781 int drainCounter; // Tracks callback counts when draining
2782 bool internalDrain; // Indicates if stop is initiated from callback or not.
2783 ASIOBufferInfo *bufferInfos;
2787 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2790 // Function declarations (definitions at end of section)
2791 static const char* getAsioErrorString( ASIOError result );
2792 static void sampleRateChanged( ASIOSampleRate sRate );
2793 static long asioMessages( long selector, long value, void* message, double* opt );
2795 RtApiAsio :: RtApiAsio()
2797 // ASIO cannot run on a multi-threaded appartment. You can call
2798 // CoInitialize beforehand, but it must be for appartment threading
2799 // (in which case, CoInitilialize will return S_FALSE here).
2800 coInitialized_ = false;
2801 HRESULT hr = CoInitialize( NULL );
2803 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2804 error( RtAudioError::WARNING );
2806 coInitialized_ = true;
2808 drivers.removeCurrentDriver();
2809 driverInfo.asioVersion = 2;
2811 // See note in DirectSound implementation about GetDesktopWindow().
2812 driverInfo.sysRef = GetForegroundWindow();
2815 RtApiAsio :: ~RtApiAsio()
2817 if ( stream_.state != STREAM_CLOSED ) closeStream();
2818 if ( coInitialized_ ) CoUninitialize();
2821 unsigned int RtApiAsio :: getDeviceCount( void )
2823 return (unsigned int) drivers.asioGetNumDev();
2826 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2828 RtAudio::DeviceInfo info;
2829 info.probed = false;
2832 unsigned int nDevices = getDeviceCount();
2833 if ( nDevices == 0 ) {
2834 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2835 error( RtAudioError::INVALID_USE );
2839 if ( device >= nDevices ) {
2840 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2841 error( RtAudioError::INVALID_USE );
2845 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2846 if ( stream_.state != STREAM_CLOSED ) {
2847 if ( device >= devices_.size() ) {
2848 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2849 error( RtAudioError::WARNING );
2852 return devices_[ device ];
2855 char driverName[32];
2856 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2857 if ( result != ASE_OK ) {
2858 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2859 errorText_ = errorStream_.str();
2860 error( RtAudioError::WARNING );
2864 info.name = driverName;
2866 if ( !drivers.loadDriver( driverName ) ) {
2867 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2868 errorText_ = errorStream_.str();
2869 error( RtAudioError::WARNING );
2873 result = ASIOInit( &driverInfo );
2874 if ( result != ASE_OK ) {
2875 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2876 errorText_ = errorStream_.str();
2877 error( RtAudioError::WARNING );
2881 // Determine the device channel information.
2882 long inputChannels, outputChannels;
2883 result = ASIOGetChannels( &inputChannels, &outputChannels );
2884 if ( result != ASE_OK ) {
2885 drivers.removeCurrentDriver();
2886 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2887 errorText_ = errorStream_.str();
2888 error( RtAudioError::WARNING );
2892 info.outputChannels = outputChannels;
2893 info.inputChannels = inputChannels;
2894 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2895 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2897 // Determine the supported sample rates.
2898 info.sampleRates.clear();
2899 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2900 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2901 if ( result == ASE_OK ) {
2902 info.sampleRates.push_back( SAMPLE_RATES[i] );
2904 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2905 info.preferredSampleRate = SAMPLE_RATES[i];
2909 // Determine supported data types ... just check first channel and assume rest are the same.
2910 ASIOChannelInfo channelInfo;
2911 channelInfo.channel = 0;
2912 channelInfo.isInput = true;
2913 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2914 result = ASIOGetChannelInfo( &channelInfo );
2915 if ( result != ASE_OK ) {
2916 drivers.removeCurrentDriver();
2917 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2918 errorText_ = errorStream_.str();
2919 error( RtAudioError::WARNING );
2923 info.nativeFormats = 0;
2924 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2925 info.nativeFormats |= RTAUDIO_SINT16;
2926 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2927 info.nativeFormats |= RTAUDIO_SINT32;
2928 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2929 info.nativeFormats |= RTAUDIO_FLOAT32;
2930 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2931 info.nativeFormats |= RTAUDIO_FLOAT64;
2932 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2933 info.nativeFormats |= RTAUDIO_SINT24;
2935 if ( info.outputChannels > 0 )
2936 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2937 if ( info.inputChannels > 0 )
2938 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2941 drivers.removeCurrentDriver();
2945 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2947 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2948 object->callbackEvent( index );
2951 void RtApiAsio :: saveDeviceInfo( void )
2955 unsigned int nDevices = getDeviceCount();
2956 devices_.resize( nDevices );
2957 for ( unsigned int i=0; i<nDevices; i++ )
2958 devices_[i] = getDeviceInfo( i );
2961 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2962 unsigned int firstChannel, unsigned int sampleRate,
2963 RtAudioFormat format, unsigned int *bufferSize,
2964 RtAudio::StreamOptions *options )
2965 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2967 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2969 // For ASIO, a duplex stream MUST use the same driver.
2970 if ( isDuplexInput && stream_.device[0] != device ) {
2971 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2975 char driverName[32];
2976 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2977 if ( result != ASE_OK ) {
2978 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2979 errorText_ = errorStream_.str();
2983 // Only load the driver once for duplex stream.
2984 if ( !isDuplexInput ) {
2985 // The getDeviceInfo() function will not work when a stream is open
2986 // because ASIO does not allow multiple devices to run at the same
2987 // time. Thus, we'll probe the system before opening a stream and
2988 // save the results for use by getDeviceInfo().
2989 this->saveDeviceInfo();
2991 if ( !drivers.loadDriver( driverName ) ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2993 errorText_ = errorStream_.str();
2997 result = ASIOInit( &driverInfo );
2998 if ( result != ASE_OK ) {
2999 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3000 errorText_ = errorStream_.str();
3005 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3006 bool buffersAllocated = false;
3007 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3008 unsigned int nChannels;
3011 // Check the device channel count.
3012 long inputChannels, outputChannels;
3013 result = ASIOGetChannels( &inputChannels, &outputChannels );
3014 if ( result != ASE_OK ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3016 errorText_ = errorStream_.str();
3020 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3021 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3022 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3023 errorText_ = errorStream_.str();
3026 stream_.nDeviceChannels[mode] = channels;
3027 stream_.nUserChannels[mode] = channels;
3028 stream_.channelOffset[mode] = firstChannel;
3030 // Verify the sample rate is supported.
3031 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3032 if ( result != ASE_OK ) {
3033 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3034 errorText_ = errorStream_.str();
3038 // Get the current sample rate
3039 ASIOSampleRate currentRate;
3040 result = ASIOGetSampleRate( ¤tRate );
3041 if ( result != ASE_OK ) {
3042 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3043 errorText_ = errorStream_.str();
3047 // Set the sample rate only if necessary
3048 if ( currentRate != sampleRate ) {
3049 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3050 if ( result != ASE_OK ) {
3051 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3052 errorText_ = errorStream_.str();
3057 // Determine the driver data type.
3058 ASIOChannelInfo channelInfo;
3059 channelInfo.channel = 0;
3060 if ( mode == OUTPUT ) channelInfo.isInput = false;
3061 else channelInfo.isInput = true;
3062 result = ASIOGetChannelInfo( &channelInfo );
3063 if ( result != ASE_OK ) {
3064 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3065 errorText_ = errorStream_.str();
3069 // Assuming WINDOWS host is always little-endian.
3070 stream_.doByteSwap[mode] = false;
3071 stream_.userFormat = format;
3072 stream_.deviceFormat[mode] = 0;
3073 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3075 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3077 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3079 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3081 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3082 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3083 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3085 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3086 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3087 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3089 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3090 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3091 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3094 if ( stream_.deviceFormat[mode] == 0 ) {
3095 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3096 errorText_ = errorStream_.str();
3100 // Set the buffer size. For a duplex stream, this will end up
3101 // setting the buffer size based on the input constraints, which
3103 long minSize, maxSize, preferSize, granularity;
3104 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3105 if ( result != ASE_OK ) {
3106 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3107 errorText_ = errorStream_.str();
3111 if ( isDuplexInput ) {
3112 // When this is the duplex input (output was opened before), then we have to use the same
3113 // buffersize as the output, because it might use the preferred buffer size, which most
3114 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3115 // So instead of throwing an error, make them equal. The caller uses the reference
3116 // to the "bufferSize" param as usual to set up processing buffers.
3118 *bufferSize = stream_.bufferSize;
3121 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3122 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3123 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3124 else if ( granularity == -1 ) {
3125 // Make sure bufferSize is a power of two.
3126 int log2_of_min_size = 0;
3127 int log2_of_max_size = 0;
3129 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3130 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3131 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3134 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3135 int min_delta_num = log2_of_min_size;
3137 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3138 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3139 if (current_delta < min_delta) {
3140 min_delta = current_delta;
3145 *bufferSize = ( (unsigned int)1 << min_delta_num );
3146 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3147 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3149 else if ( granularity != 0 ) {
3150 // Set to an even multiple of granularity, rounding up.
3151 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3156 // we don't use it anymore, see above!
3157 // Just left it here for the case...
3158 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3159 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3164 stream_.bufferSize = *bufferSize;
3165 stream_.nBuffers = 2;
3167 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3168 else stream_.userInterleaved = true;
3170 // ASIO always uses non-interleaved buffers.
3171 stream_.deviceInterleaved[mode] = false;
3173 // Allocate, if necessary, our AsioHandle structure for the stream.
3174 if ( handle == 0 ) {
3176 handle = new AsioHandle;
3178 catch ( std::bad_alloc& ) {
3179 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3182 handle->bufferInfos = 0;
3184 // Create a manual-reset event.
3185 handle->condition = CreateEvent( NULL, // no security
3186 TRUE, // manual-reset
3187 FALSE, // non-signaled initially
3189 stream_.apiHandle = (void *) handle;
3192 // Create the ASIO internal buffers. Since RtAudio sets up input
3193 // and output separately, we'll have to dispose of previously
3194 // created output buffers for a duplex stream.
3195 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3196 ASIODisposeBuffers();
3197 if ( handle->bufferInfos ) free( handle->bufferInfos );
3200 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3202 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3203 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3204 if ( handle->bufferInfos == NULL ) {
3205 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3206 errorText_ = errorStream_.str();
3210 ASIOBufferInfo *infos;
3211 infos = handle->bufferInfos;
3212 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3213 infos->isInput = ASIOFalse;
3214 infos->channelNum = i + stream_.channelOffset[0];
3215 infos->buffers[0] = infos->buffers[1] = 0;
3217 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3218 infos->isInput = ASIOTrue;
3219 infos->channelNum = i + stream_.channelOffset[1];
3220 infos->buffers[0] = infos->buffers[1] = 0;
3223 // prepare for callbacks
3224 stream_.sampleRate = sampleRate;
3225 stream_.device[mode] = device;
3226 stream_.mode = isDuplexInput ? DUPLEX : mode;
3228 // store this class instance before registering callbacks, that are going to use it
3229 asioCallbackInfo = &stream_.callbackInfo;
3230 stream_.callbackInfo.object = (void *) this;
3232 // Set up the ASIO callback structure and create the ASIO data buffers.
3233 asioCallbacks.bufferSwitch = &bufferSwitch;
3234 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3235 asioCallbacks.asioMessage = &asioMessages;
3236 asioCallbacks.bufferSwitchTimeInfo = NULL;
3237 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3238 if ( result != ASE_OK ) {
3239 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3240 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3241 // in that case, let's be naïve and try that instead
3242 *bufferSize = preferSize;
3243 stream_.bufferSize = *bufferSize;
3244 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3247 if ( result != ASE_OK ) {
3248 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3249 errorText_ = errorStream_.str();
3252 buffersAllocated = true;
3253 stream_.state = STREAM_STOPPED;
3255 // Set flags for buffer conversion.
3256 stream_.doConvertBuffer[mode] = false;
3257 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3258 stream_.doConvertBuffer[mode] = true;
3259 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3260 stream_.nUserChannels[mode] > 1 )
3261 stream_.doConvertBuffer[mode] = true;
3263 // Allocate necessary internal buffers
3264 unsigned long bufferBytes;
3265 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3266 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3267 if ( stream_.userBuffer[mode] == NULL ) {
3268 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3272 if ( stream_.doConvertBuffer[mode] ) {
3274 bool makeBuffer = true;
3275 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3276 if ( isDuplexInput && stream_.deviceBuffer ) {
3277 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3278 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3282 bufferBytes *= *bufferSize;
3283 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3284 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3285 if ( stream_.deviceBuffer == NULL ) {
3286 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3292 // Determine device latencies
3293 long inputLatency, outputLatency;
3294 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3295 if ( result != ASE_OK ) {
3296 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3297 errorText_ = errorStream_.str();
3298 error( RtAudioError::WARNING); // warn but don't fail
3301 stream_.latency[0] = outputLatency;
3302 stream_.latency[1] = inputLatency;
3305 // Setup the buffer conversion information structure. We don't use
3306 // buffers to do channel offsets, so we override that parameter
3308 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3313 if ( !isDuplexInput ) {
3314 // the cleanup for error in the duplex input, is done by RtApi::openStream
3315 // So we clean up for single channel only
3317 if ( buffersAllocated )
3318 ASIODisposeBuffers();
3320 drivers.removeCurrentDriver();
3323 CloseHandle( handle->condition );
3324 if ( handle->bufferInfos )
3325 free( handle->bufferInfos );
3328 stream_.apiHandle = 0;
3332 if ( stream_.userBuffer[mode] ) {
3333 free( stream_.userBuffer[mode] );
3334 stream_.userBuffer[mode] = 0;
3337 if ( stream_.deviceBuffer ) {
3338 free( stream_.deviceBuffer );
3339 stream_.deviceBuffer = 0;
3344 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3346 void RtApiAsio :: closeStream()
3348 if ( stream_.state == STREAM_CLOSED ) {
3349 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3350 error( RtAudioError::WARNING );
3354 if ( stream_.state == STREAM_RUNNING ) {
3355 stream_.state = STREAM_STOPPED;
3358 ASIODisposeBuffers();
3359 drivers.removeCurrentDriver();
3361 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3363 CloseHandle( handle->condition );
3364 if ( handle->bufferInfos )
3365 free( handle->bufferInfos );
3367 stream_.apiHandle = 0;
3370 for ( int i=0; i<2; i++ ) {
3371 if ( stream_.userBuffer[i] ) {
3372 free( stream_.userBuffer[i] );
3373 stream_.userBuffer[i] = 0;
3377 if ( stream_.deviceBuffer ) {
3378 free( stream_.deviceBuffer );
3379 stream_.deviceBuffer = 0;
3382 stream_.mode = UNINITIALIZED;
3383 stream_.state = STREAM_CLOSED;
3386 bool stopThreadCalled = false;
3388 void RtApiAsio :: startStream()
3391 if ( stream_.state == STREAM_RUNNING ) {
3392 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3393 error( RtAudioError::WARNING );
3397 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3398 ASIOError result = ASIOStart();
3399 if ( result != ASE_OK ) {
3400 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3401 errorText_ = errorStream_.str();
3405 handle->drainCounter = 0;
3406 handle->internalDrain = false;
3407 ResetEvent( handle->condition );
3408 stream_.state = STREAM_RUNNING;
3412 stopThreadCalled = false;
3414 if ( result == ASE_OK ) return;
3415 error( RtAudioError::SYSTEM_ERROR );
3418 void RtApiAsio :: stopStream()
3421 if ( stream_.state == STREAM_STOPPED ) {
3422 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3423 error( RtAudioError::WARNING );
3427 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3429 if ( handle->drainCounter == 0 ) {
3430 handle->drainCounter = 2;
3431 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3435 stream_.state = STREAM_STOPPED;
3437 ASIOError result = ASIOStop();
3438 if ( result != ASE_OK ) {
3439 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3440 errorText_ = errorStream_.str();
3443 if ( result == ASE_OK ) return;
3444 error( RtAudioError::SYSTEM_ERROR );
3447 void RtApiAsio :: abortStream()
3450 if ( stream_.state == STREAM_STOPPED ) {
3451 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3452 error( RtAudioError::WARNING );
3456 // The following lines were commented-out because some behavior was
3457 // noted where the device buffers need to be zeroed to avoid
3458 // continuing sound, even when the device buffers are completely
3459 // disposed. So now, calling abort is the same as calling stop.
3460 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3461 // handle->drainCounter = 2;
3465 // This function will be called by a spawned thread when the user
3466 // callback function signals that the stream should be stopped or
3467 // aborted. It is necessary to handle it this way because the
3468 // callbackEvent() function must return before the ASIOStop()
3469 // function will return.
3470 static unsigned __stdcall asioStopStream( void *ptr )
3472 CallbackInfo *info = (CallbackInfo *) ptr;
3473 RtApiAsio *object = (RtApiAsio *) info->object;
3475 object->stopStream();
3480 bool RtApiAsio :: callbackEvent( long bufferIndex )
3482 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3483 if ( stream_.state == STREAM_CLOSED ) {
3484 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3485 error( RtAudioError::WARNING );
3489 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3490 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3492 // Check if we were draining the stream and signal if finished.
3493 if ( handle->drainCounter > 3 ) {
3495 stream_.state = STREAM_STOPPING;
3496 if ( handle->internalDrain == false )
3497 SetEvent( handle->condition );
3498 else { // spawn a thread to stop the stream
3500 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3501 &stream_.callbackInfo, 0, &threadId );
3506 // Invoke user callback to get fresh output data UNLESS we are
3508 if ( handle->drainCounter == 0 ) {
3509 RtAudioCallback callback = (RtAudioCallback) info->callback;
3510 double streamTime = getStreamTime();
3511 RtAudioStreamStatus status = 0;
3512 if ( stream_.mode != INPUT && asioXRun == true ) {
3513 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3516 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3517 status |= RTAUDIO_INPUT_OVERFLOW;
3520 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3521 stream_.bufferSize, streamTime, status, info->userData );
3522 if ( cbReturnValue == 2 ) {
3523 stream_.state = STREAM_STOPPING;
3524 handle->drainCounter = 2;
3526 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3527 &stream_.callbackInfo, 0, &threadId );
3530 else if ( cbReturnValue == 1 ) {
3531 handle->drainCounter = 1;
3532 handle->internalDrain = true;
3536 unsigned int nChannels, bufferBytes, i, j;
3537 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3538 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3540 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3542 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3544 for ( i=0, j=0; i<nChannels; i++ ) {
3545 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3546 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3550 else if ( stream_.doConvertBuffer[0] ) {
3552 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3553 if ( stream_.doByteSwap[0] )
3554 byteSwapBuffer( stream_.deviceBuffer,
3555 stream_.bufferSize * stream_.nDeviceChannels[0],
3556 stream_.deviceFormat[0] );
3558 for ( i=0, j=0; i<nChannels; i++ ) {
3559 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3560 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3561 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3567 if ( stream_.doByteSwap[0] )
3568 byteSwapBuffer( stream_.userBuffer[0],
3569 stream_.bufferSize * stream_.nUserChannels[0],
3570 stream_.userFormat );
3572 for ( i=0, j=0; i<nChannels; i++ ) {
3573 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3574 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3575 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3581 // Don't bother draining input
3582 if ( handle->drainCounter ) {
3583 handle->drainCounter++;
3587 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3589 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3591 if (stream_.doConvertBuffer[1]) {
3593 // Always interleave ASIO input data.
3594 for ( i=0, j=0; i<nChannels; i++ ) {
3595 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3596 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3597 handle->bufferInfos[i].buffers[bufferIndex],
3601 if ( stream_.doByteSwap[1] )
3602 byteSwapBuffer( stream_.deviceBuffer,
3603 stream_.bufferSize * stream_.nDeviceChannels[1],
3604 stream_.deviceFormat[1] );
3605 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3609 for ( i=0, j=0; i<nChannels; i++ ) {
3610 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3611 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3612 handle->bufferInfos[i].buffers[bufferIndex],
3617 if ( stream_.doByteSwap[1] )
3618 byteSwapBuffer( stream_.userBuffer[1],
3619 stream_.bufferSize * stream_.nUserChannels[1],
3620 stream_.userFormat );
3625 // The following call was suggested by Malte Clasen. While the API
3626 // documentation indicates it should not be required, some device
3627 // drivers apparently do not function correctly without it.
3630 RtApi::tickStreamTime();
3634 static void sampleRateChanged( ASIOSampleRate sRate )
3636 // The ASIO documentation says that this usually only happens during
3637 // external sync. Audio processing is not stopped by the driver,
3638 // actual sample rate might not have even changed, maybe only the
3639 // sample rate status of an AES/EBU or S/PDIF digital input at the
3642 RtApi *object = (RtApi *) asioCallbackInfo->object;
3644 object->stopStream();
3646 catch ( RtAudioError &exception ) {
3647 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3651 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3654 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3658 switch( selector ) {
3659 case kAsioSelectorSupported:
3660 if ( value == kAsioResetRequest
3661 || value == kAsioEngineVersion
3662 || value == kAsioResyncRequest
3663 || value == kAsioLatenciesChanged
3664 // The following three were added for ASIO 2.0, you don't
3665 // necessarily have to support them.
3666 || value == kAsioSupportsTimeInfo
3667 || value == kAsioSupportsTimeCode
3668 || value == kAsioSupportsInputMonitor)
3671 case kAsioResetRequest:
3672 // Defer the task and perform the reset of the driver during the
3673 // next "safe" situation. You cannot reset the driver right now,
3674 // as this code is called from the driver. Reset the driver is
3675 // done by completely destruct is. I.e. ASIOStop(),
3676 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3678 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3681 case kAsioResyncRequest:
3682 // This informs the application that the driver encountered some
3683 // non-fatal data loss. It is used for synchronization purposes
3684 // of different media. Added mainly to work around the Win16Mutex
3685 // problems in Windows 95/98 with the Windows Multimedia system,
3686 // which could lose data because the Mutex was held too long by
3687 // another thread. However a driver can issue it in other
3689 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3693 case kAsioLatenciesChanged:
3694 // This will inform the host application that the drivers were
3695 // latencies changed. Beware, it this does not mean that the
3696 // buffer sizes have changed! You might need to update internal
3698 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3701 case kAsioEngineVersion:
3702 // Return the supported ASIO version of the host application. If
3703 // a host application does not implement this selector, ASIO 1.0
3704 // is assumed by the driver.
3707 case kAsioSupportsTimeInfo:
3708 // Informs the driver whether the
3709 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3710 // For compatibility with ASIO 1.0 drivers the host application
3711 // should always support the "old" bufferSwitch method, too.
3714 case kAsioSupportsTimeCode:
3715 // Informs the driver whether application is interested in time
3716 // code info. If an application does not need to know about time
3717 // code, the driver has less work to do.
3724 static const char* getAsioErrorString( ASIOError result )
3732 static const Messages m[] =
3734 { ASE_NotPresent, "Hardware input or output is not present or available." },
3735 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3736 { ASE_InvalidParameter, "Invalid input parameter." },
3737 { ASE_InvalidMode, "Invalid mode." },
3738 { ASE_SPNotAdvancing, "Sample position not advancing." },
3739 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3740 { ASE_NoMemory, "Not enough memory to complete the request." }
3743 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3744 if ( m[i].value == result ) return m[i].message;
3746 return "Unknown error.";
3749 //******************** End of __WINDOWS_ASIO__ *********************//
3753 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3755 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3756 // - Introduces support for the Windows WASAPI API
3757 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3758 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3759 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3764 #include <audioclient.h>
3766 #include <mmdeviceapi.h>
3767 #include <functiondiscoverykeys_devpkey.h>
3770 //=============================================================================
3772 #define SAFE_RELEASE( objectPtr )\
3775 objectPtr->Release();\
3779 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3781 //-----------------------------------------------------------------------------
3783 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3784 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3785 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3786 // provide intermediate storage for read / write synchronization.
3800 // sets the length of the internal ring buffer
3801 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3804 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3806 bufferSize_ = bufferSize;
3811 // attempt to push a buffer into the ring buffer at the current "in" index
3812 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3814 if ( !buffer || // incoming buffer is NULL
3815 bufferSize == 0 || // incoming buffer has no data
3816 bufferSize > bufferSize_ ) // incoming buffer too large
3821 unsigned int relOutIndex = outIndex_;
3822 unsigned int inIndexEnd = inIndex_ + bufferSize;
3823 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3824 relOutIndex += bufferSize_;
3827 // "in" index can end on the "out" index but cannot begin at it
3828 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3829 return false; // not enough space between "in" index and "out" index
3832 // copy buffer from external to internal
3833 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3834 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3835 int fromInSize = bufferSize - fromZeroSize;
3840 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3841 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3843 case RTAUDIO_SINT16:
3844 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3845 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3847 case RTAUDIO_SINT24:
3848 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3849 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3851 case RTAUDIO_SINT32:
3852 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3853 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3855 case RTAUDIO_FLOAT32:
3856 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3857 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3859 case RTAUDIO_FLOAT64:
3860 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3861 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3865 // update "in" index
3866 inIndex_ += bufferSize;
3867 inIndex_ %= bufferSize_;
3872 // attempt to pull a buffer from the ring buffer from the current "out" index
3873 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3875 if ( !buffer || // incoming buffer is NULL
3876 bufferSize == 0 || // incoming buffer has no data
3877 bufferSize > bufferSize_ ) // incoming buffer too large
3882 unsigned int relInIndex = inIndex_;
3883 unsigned int outIndexEnd = outIndex_ + bufferSize;
3884 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3885 relInIndex += bufferSize_;
3888 // "out" index can begin at and end on the "in" index
3889 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3890 return false; // not enough space between "out" index and "in" index
3893 // copy buffer from internal to external
3894 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3895 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3896 int fromOutSize = bufferSize - fromZeroSize;
3901 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3902 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3904 case RTAUDIO_SINT16:
3905 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3906 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3908 case RTAUDIO_SINT24:
3909 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3910 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3912 case RTAUDIO_SINT32:
3913 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3914 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3916 case RTAUDIO_FLOAT32:
3917 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3918 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3920 case RTAUDIO_FLOAT64:
3921 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3922 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3926 // update "out" index
3927 outIndex_ += bufferSize;
3928 outIndex_ %= bufferSize_;
3935 unsigned int bufferSize_;
3936 unsigned int inIndex_;
3937 unsigned int outIndex_;
3940 //-----------------------------------------------------------------------------
3942 // A structure to hold various information related to the WASAPI implementation.
3945 IAudioClient* captureAudioClient;
3946 IAudioClient* renderAudioClient;
3947 IAudioCaptureClient* captureClient;
3948 IAudioRenderClient* renderClient;
3949 HANDLE captureEvent;
3953 : captureAudioClient( NULL ),
3954 renderAudioClient( NULL ),
3955 captureClient( NULL ),
3956 renderClient( NULL ),
3957 captureEvent( NULL ),
3958 renderEvent( NULL ) {}
3961 //=============================================================================
3963 RtApiWasapi::RtApiWasapi()
3964 : coInitialized_( false ), deviceEnumerator_( NULL )
3966 // WASAPI can run either apartment or multi-threaded
3967 HRESULT hr = CoInitialize( NULL );
3968 if ( !FAILED( hr ) )
3969 coInitialized_ = true;
3971 // Instantiate device enumerator
3972 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3973 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3974 ( void** ) &deviceEnumerator_ );
3976 if ( FAILED( hr ) ) {
3977 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3978 error( RtAudioError::DRIVER_ERROR );
3982 //-----------------------------------------------------------------------------
3984 RtApiWasapi::~RtApiWasapi()
3986 if ( stream_.state != STREAM_CLOSED )
3989 SAFE_RELEASE( deviceEnumerator_ );
3991 // If this object previously called CoInitialize()
3992 if ( coInitialized_ )
3996 //=============================================================================
3998 unsigned int RtApiWasapi::getDeviceCount( void )
4000 unsigned int captureDeviceCount = 0;
4001 unsigned int renderDeviceCount = 0;
4003 IMMDeviceCollection* captureDevices = NULL;
4004 IMMDeviceCollection* renderDevices = NULL;
4006 // Count capture devices
4008 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4009 if ( FAILED( hr ) ) {
4010 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4014 hr = captureDevices->GetCount( &captureDeviceCount );
4015 if ( FAILED( hr ) ) {
4016 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4020 // Count render devices
4021 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4022 if ( FAILED( hr ) ) {
4023 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4027 hr = renderDevices->GetCount( &renderDeviceCount );
4028 if ( FAILED( hr ) ) {
4029 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4034 // release all references
4035 SAFE_RELEASE( captureDevices );
4036 SAFE_RELEASE( renderDevices );
4038 if ( errorText_.empty() )
4039 return captureDeviceCount + renderDeviceCount;
4041 error( RtAudioError::DRIVER_ERROR );
4045 //-----------------------------------------------------------------------------
4047 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4049 RtAudio::DeviceInfo info;
4050 unsigned int captureDeviceCount = 0;
4051 unsigned int renderDeviceCount = 0;
4052 std::string defaultDeviceName;
4053 bool isCaptureDevice = false;
4055 PROPVARIANT deviceNameProp;
4056 PROPVARIANT defaultDeviceNameProp;
4058 IMMDeviceCollection* captureDevices = NULL;
4059 IMMDeviceCollection* renderDevices = NULL;
4060 IMMDevice* devicePtr = NULL;
4061 IMMDevice* defaultDevicePtr = NULL;
4062 IAudioClient* audioClient = NULL;
4063 IPropertyStore* devicePropStore = NULL;
4064 IPropertyStore* defaultDevicePropStore = NULL;
4066 WAVEFORMATEX* deviceFormat = NULL;
4067 WAVEFORMATEX* closestMatchFormat = NULL;
4070 info.probed = false;
4072 // Count capture devices
4074 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4075 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4076 if ( FAILED( hr ) ) {
4077 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4081 hr = captureDevices->GetCount( &captureDeviceCount );
4082 if ( FAILED( hr ) ) {
4083 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4087 // Count render devices
4088 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4089 if ( FAILED( hr ) ) {
4090 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4094 hr = renderDevices->GetCount( &renderDeviceCount );
4095 if ( FAILED( hr ) ) {
4096 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4100 // validate device index
4101 if ( device >= captureDeviceCount + renderDeviceCount ) {
4102 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4103 errorType = RtAudioError::INVALID_USE;
4107 // determine whether index falls within capture or render devices
4108 if ( device >= renderDeviceCount ) {
4109 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4110 if ( FAILED( hr ) ) {
4111 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4114 isCaptureDevice = true;
4117 hr = renderDevices->Item( device, &devicePtr );
4118 if ( FAILED( hr ) ) {
4119 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4122 isCaptureDevice = false;
4125 // get default device name
4126 if ( isCaptureDevice ) {
4127 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4128 if ( FAILED( hr ) ) {
4129 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4134 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4135 if ( FAILED( hr ) ) {
4136 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4141 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4142 if ( FAILED( hr ) ) {
4143 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4146 PropVariantInit( &defaultDeviceNameProp );
4148 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4149 if ( FAILED( hr ) ) {
4150 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4154 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4157 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4158 if ( FAILED( hr ) ) {
4159 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4163 PropVariantInit( &deviceNameProp );
4165 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4166 if ( FAILED( hr ) ) {
4167 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4171 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4174 if ( isCaptureDevice ) {
4175 info.isDefaultInput = info.name == defaultDeviceName;
4176 info.isDefaultOutput = false;
4179 info.isDefaultInput = false;
4180 info.isDefaultOutput = info.name == defaultDeviceName;
4184 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4185 if ( FAILED( hr ) ) {
4186 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4190 hr = audioClient->GetMixFormat( &deviceFormat );
4191 if ( FAILED( hr ) ) {
4192 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4196 if ( isCaptureDevice ) {
4197 info.inputChannels = deviceFormat->nChannels;
4198 info.outputChannels = 0;
4199 info.duplexChannels = 0;
4202 info.inputChannels = 0;
4203 info.outputChannels = deviceFormat->nChannels;
4204 info.duplexChannels = 0;
4207 // sample rates (WASAPI only supports the one native sample rate)
4208 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4210 info.sampleRates.clear();
4211 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4214 info.nativeFormats = 0;
4216 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4217 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4218 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4220 if ( deviceFormat->wBitsPerSample == 32 ) {
4221 info.nativeFormats |= RTAUDIO_FLOAT32;
4223 else if ( deviceFormat->wBitsPerSample == 64 ) {
4224 info.nativeFormats |= RTAUDIO_FLOAT64;
4227 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4228 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4229 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4231 if ( deviceFormat->wBitsPerSample == 8 ) {
4232 info.nativeFormats |= RTAUDIO_SINT8;
4234 else if ( deviceFormat->wBitsPerSample == 16 ) {
4235 info.nativeFormats |= RTAUDIO_SINT16;
4237 else if ( deviceFormat->wBitsPerSample == 24 ) {
4238 info.nativeFormats |= RTAUDIO_SINT24;
4240 else if ( deviceFormat->wBitsPerSample == 32 ) {
4241 info.nativeFormats |= RTAUDIO_SINT32;
4249 // release all references
4250 PropVariantClear( &deviceNameProp );
4251 PropVariantClear( &defaultDeviceNameProp );
4253 SAFE_RELEASE( captureDevices );
4254 SAFE_RELEASE( renderDevices );
4255 SAFE_RELEASE( devicePtr );
4256 SAFE_RELEASE( defaultDevicePtr );
4257 SAFE_RELEASE( audioClient );
4258 SAFE_RELEASE( devicePropStore );
4259 SAFE_RELEASE( defaultDevicePropStore );
4261 CoTaskMemFree( deviceFormat );
4262 CoTaskMemFree( closestMatchFormat );
4264 if ( !errorText_.empty() )
4269 //-----------------------------------------------------------------------------
4271 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4273 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4274 if ( getDeviceInfo( i ).isDefaultOutput ) {
4282 //-----------------------------------------------------------------------------
4284 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4286 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4287 if ( getDeviceInfo( i ).isDefaultInput ) {
4295 //-----------------------------------------------------------------------------
4297 void RtApiWasapi::closeStream( void )
4299 if ( stream_.state == STREAM_CLOSED ) {
4300 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4301 error( RtAudioError::WARNING );
4305 if ( stream_.state != STREAM_STOPPED )
4308 // clean up stream memory
4309 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4310 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4312 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4313 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4315 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4316 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4318 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4319 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4321 delete ( WasapiHandle* ) stream_.apiHandle;
4322 stream_.apiHandle = NULL;
4324 for ( int i = 0; i < 2; i++ ) {
4325 if ( stream_.userBuffer[i] ) {
4326 free( stream_.userBuffer[i] );
4327 stream_.userBuffer[i] = 0;
4331 if ( stream_.deviceBuffer ) {
4332 free( stream_.deviceBuffer );
4333 stream_.deviceBuffer = 0;
4336 // update stream state
4337 stream_.state = STREAM_CLOSED;
4340 //-----------------------------------------------------------------------------
4342 void RtApiWasapi::startStream( void )
4346 if ( stream_.state == STREAM_RUNNING ) {
4347 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4348 error( RtAudioError::WARNING );
4352 // update stream state
4353 stream_.state = STREAM_RUNNING;
4355 // create WASAPI stream thread
4356 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4358 if ( !stream_.callbackInfo.thread ) {
4359 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4360 error( RtAudioError::THREAD_ERROR );
4363 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4364 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4368 //-----------------------------------------------------------------------------
4370 void RtApiWasapi::stopStream( void )
4374 if ( stream_.state == STREAM_STOPPED ) {
4375 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4376 error( RtAudioError::WARNING );
4380 // inform stream thread by setting stream state to STREAM_STOPPING
4381 stream_.state = STREAM_STOPPING;
4383 // wait until stream thread is stopped
4384 while( stream_.state != STREAM_STOPPED ) {
4388 // Wait for the last buffer to play before stopping.
4389 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4391 // stop capture client if applicable
4392 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4393 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4394 if ( FAILED( hr ) ) {
4395 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4396 error( RtAudioError::DRIVER_ERROR );
4401 // stop render client if applicable
4402 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4403 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4404 if ( FAILED( hr ) ) {
4405 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4406 error( RtAudioError::DRIVER_ERROR );
4411 // close thread handle
4412 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4413 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4414 error( RtAudioError::THREAD_ERROR );
4418 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4421 //-----------------------------------------------------------------------------
4423 void RtApiWasapi::abortStream( void )
4427 if ( stream_.state == STREAM_STOPPED ) {
4428 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4429 error( RtAudioError::WARNING );
4433 // inform stream thread by setting stream state to STREAM_STOPPING
4434 stream_.state = STREAM_STOPPING;
4436 // wait until stream thread is stopped
4437 while ( stream_.state != STREAM_STOPPED ) {
4441 // stop capture client if applicable
4442 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4443 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4444 if ( FAILED( hr ) ) {
4445 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4446 error( RtAudioError::DRIVER_ERROR );
4451 // stop render client if applicable
4452 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4453 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4454 if ( FAILED( hr ) ) {
4455 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4456 error( RtAudioError::DRIVER_ERROR );
4461 // close thread handle
4462 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4463 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4464 error( RtAudioError::THREAD_ERROR );
4468 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4471 //-----------------------------------------------------------------------------
4473 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4474 unsigned int firstChannel, unsigned int sampleRate,
4475 RtAudioFormat format, unsigned int* bufferSize,
4476 RtAudio::StreamOptions* options )
4478 bool methodResult = FAILURE;
4479 unsigned int captureDeviceCount = 0;
4480 unsigned int renderDeviceCount = 0;
4482 IMMDeviceCollection* captureDevices = NULL;
4483 IMMDeviceCollection* renderDevices = NULL;
4484 IMMDevice* devicePtr = NULL;
4485 WAVEFORMATEX* deviceFormat = NULL;
4486 unsigned int bufferBytes;
4487 stream_.state = STREAM_STOPPED;
4488 RtAudio::DeviceInfo deviceInfo;
4490 // create API Handle if not already created
4491 if ( !stream_.apiHandle )
4492 stream_.apiHandle = ( void* ) new WasapiHandle();
4494 // Count capture devices
4496 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4497 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4498 if ( FAILED( hr ) ) {
4499 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4503 hr = captureDevices->GetCount( &captureDeviceCount );
4504 if ( FAILED( hr ) ) {
4505 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4509 // Count render devices
4510 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4511 if ( FAILED( hr ) ) {
4512 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4516 hr = renderDevices->GetCount( &renderDeviceCount );
4517 if ( FAILED( hr ) ) {
4518 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4522 // validate device index
4523 if ( device >= captureDeviceCount + renderDeviceCount ) {
4524 errorType = RtAudioError::INVALID_USE;
4525 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4529 deviceInfo = getDeviceInfo( device );
4531 // validate sample rate
4532 if ( sampleRate != deviceInfo.preferredSampleRate )
4534 errorType = RtAudioError::INVALID_USE;
4535 std::stringstream ss;
4536 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4537 << "Hz sample rate not supported. This device only supports "
4538 << deviceInfo.preferredSampleRate << "Hz.";
4539 errorText_ = ss.str();
4543 // determine whether index falls within capture or render devices
4544 if ( device >= renderDeviceCount ) {
4545 if ( mode != INPUT ) {
4546 errorType = RtAudioError::INVALID_USE;
4547 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4551 // retrieve captureAudioClient from devicePtr
4552 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4554 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4555 if ( FAILED( hr ) ) {
4556 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4560 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4561 NULL, ( void** ) &captureAudioClient );
4562 if ( FAILED( hr ) ) {
4563 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4567 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4568 if ( FAILED( hr ) ) {
4569 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4573 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4574 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4577 if ( mode != OUTPUT ) {
4578 errorType = RtAudioError::INVALID_USE;
4579 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4583 // retrieve renderAudioClient from devicePtr
4584 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4586 hr = renderDevices->Item( device, &devicePtr );
4587 if ( FAILED( hr ) ) {
4588 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4592 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4593 NULL, ( void** ) &renderAudioClient );
4594 if ( FAILED( hr ) ) {
4595 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4599 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4600 if ( FAILED( hr ) ) {
4601 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4605 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4606 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4610 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4611 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4612 stream_.mode = DUPLEX;
4615 stream_.mode = mode;
4618 stream_.device[mode] = device;
4619 stream_.doByteSwap[mode] = false;
4620 stream_.sampleRate = sampleRate;
4621 stream_.bufferSize = *bufferSize;
4622 stream_.nBuffers = 1;
4623 stream_.nUserChannels[mode] = channels;
4624 stream_.channelOffset[mode] = firstChannel;
4625 stream_.userFormat = format;
4626 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4628 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4629 stream_.userInterleaved = false;
4631 stream_.userInterleaved = true;
4632 stream_.deviceInterleaved[mode] = true;
4634 // Set flags for buffer conversion.
4635 stream_.doConvertBuffer[mode] = false;
4636 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4637 stream_.nUserChannels != stream_.nDeviceChannels )
4638 stream_.doConvertBuffer[mode] = true;
4639 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4640 stream_.nUserChannels[mode] > 1 )
4641 stream_.doConvertBuffer[mode] = true;
4643 if ( stream_.doConvertBuffer[mode] )
4644 setConvertInfo( mode, 0 );
4646 // Allocate necessary internal buffers
4647 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4649 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4650 if ( !stream_.userBuffer[mode] ) {
4651 errorType = RtAudioError::MEMORY_ERROR;
4652 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4656 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4657 stream_.callbackInfo.priority = 15;
4659 stream_.callbackInfo.priority = 0;
4661 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4662 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4664 methodResult = SUCCESS;
4668 SAFE_RELEASE( captureDevices );
4669 SAFE_RELEASE( renderDevices );
4670 SAFE_RELEASE( devicePtr );
4671 CoTaskMemFree( deviceFormat );
4673 // if method failed, close the stream
4674 if ( methodResult == FAILURE )
4677 if ( !errorText_.empty() )
4679 return methodResult;
4682 //=============================================================================
4684 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4687 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4692 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4695 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4700 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4703 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4708 //-----------------------------------------------------------------------------
4710 void RtApiWasapi::wasapiThread()
4712 // as this is a new thread, we must CoInitialize it
4713 CoInitialize( NULL );
4717 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4718 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4719 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4720 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4721 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4722 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4724 WAVEFORMATEX* captureFormat = NULL;
4725 WAVEFORMATEX* renderFormat = NULL;
4726 WasapiBuffer captureBuffer;
4727 WasapiBuffer renderBuffer;
4729 // declare local stream variables
4730 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4731 BYTE* streamBuffer = NULL;
4732 unsigned long captureFlags = 0;
4733 unsigned int bufferFrameCount = 0;
4734 unsigned int numFramesPadding = 0;
4735 bool callbackPushed = false;
4736 bool callbackPulled = false;
4737 bool callbackStopped = false;
4738 int callbackResult = 0;
4740 unsigned int deviceBuffSize = 0;
4743 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4745 // Attempt to assign "Pro Audio" characteristic to thread
4746 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4748 DWORD taskIndex = 0;
4749 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4750 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4751 FreeLibrary( AvrtDll );
4754 // start capture stream if applicable
4755 if ( captureAudioClient ) {
4756 hr = captureAudioClient->GetMixFormat( &captureFormat );
4757 if ( FAILED( hr ) ) {
4758 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4762 // initialize capture stream according to desire buffer size
4763 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4765 if ( !captureClient ) {
4766 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4767 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4768 desiredBufferPeriod,
4769 desiredBufferPeriod,
4772 if ( FAILED( hr ) ) {
4773 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4777 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4778 ( void** ) &captureClient );
4779 if ( FAILED( hr ) ) {
4780 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4784 // configure captureEvent to trigger on every available capture buffer
4785 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4786 if ( !captureEvent ) {
4787 errorType = RtAudioError::SYSTEM_ERROR;
4788 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4792 hr = captureAudioClient->SetEventHandle( captureEvent );
4793 if ( FAILED( hr ) ) {
4794 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4798 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4799 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4802 unsigned int inBufferSize = 0;
4803 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4804 if ( FAILED( hr ) ) {
4805 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4809 // scale outBufferSize according to stream->user sample rate ratio
4810 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4811 inBufferSize *= stream_.nDeviceChannels[INPUT];
4813 // set captureBuffer size
4814 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4816 // reset the capture stream
4817 hr = captureAudioClient->Reset();
4818 if ( FAILED( hr ) ) {
4819 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4823 // start the capture stream
4824 hr = captureAudioClient->Start();
4825 if ( FAILED( hr ) ) {
4826 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4831 // start render stream if applicable
4832 if ( renderAudioClient ) {
4833 hr = renderAudioClient->GetMixFormat( &renderFormat );
4834 if ( FAILED( hr ) ) {
4835 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4839 // initialize render stream according to desire buffer size
4840 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4842 if ( !renderClient ) {
4843 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4844 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4845 desiredBufferPeriod,
4846 desiredBufferPeriod,
4849 if ( FAILED( hr ) ) {
4850 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4854 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4855 ( void** ) &renderClient );
4856 if ( FAILED( hr ) ) {
4857 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4861 // configure renderEvent to trigger on every available render buffer
4862 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4863 if ( !renderEvent ) {
4864 errorType = RtAudioError::SYSTEM_ERROR;
4865 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4869 hr = renderAudioClient->SetEventHandle( renderEvent );
4870 if ( FAILED( hr ) ) {
4871 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4875 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4876 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4879 unsigned int outBufferSize = 0;
4880 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4881 if ( FAILED( hr ) ) {
4882 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4886 // scale inBufferSize according to user->stream sample rate ratio
4887 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4888 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4890 // set renderBuffer size
4891 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4893 // reset the render stream
4894 hr = renderAudioClient->Reset();
4895 if ( FAILED( hr ) ) {
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4900 // start the render stream
4901 hr = renderAudioClient->Start();
4902 if ( FAILED( hr ) ) {
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4908 if ( stream_.mode == INPUT ) {
4909 using namespace std; // for roundf
4910 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4912 else if ( stream_.mode == OUTPUT ) {
4913 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4915 else if ( stream_.mode == DUPLEX ) {
4916 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4917 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4920 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4921 if ( !stream_.deviceBuffer ) {
4922 errorType = RtAudioError::MEMORY_ERROR;
4923 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4927 // stream process loop
4928 while ( stream_.state != STREAM_STOPPING ) {
4929 if ( !callbackPulled ) {
4932 // 1. Pull callback buffer from inputBuffer
4933 // 2. If 1. was successful: Convert callback buffer to user format
4935 if ( captureAudioClient ) {
4936 // Pull callback buffer from inputBuffer
4937 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4938 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4939 stream_.deviceFormat[INPUT] );
4941 if ( callbackPulled ) {
4942 if ( stream_.doConvertBuffer[INPUT] ) {
4943 // Convert callback buffer to user format
4944 convertBuffer( stream_.userBuffer[INPUT],
4945 stream_.deviceBuffer,
4946 stream_.convertInfo[INPUT] );
4949 // no further conversion, simple copy deviceBuffer to userBuffer
4950 memcpy( stream_.userBuffer[INPUT],
4951 stream_.deviceBuffer,
4952 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4957 // if there is no capture stream, set callbackPulled flag
4958 callbackPulled = true;
4963 // 1. Execute user callback method
4964 // 2. Handle return value from callback
4966 // if callback has not requested the stream to stop
4967 if ( callbackPulled && !callbackStopped ) {
4968 // Execute user callback method
4969 callbackResult = callback( stream_.userBuffer[OUTPUT],
4970 stream_.userBuffer[INPUT],
4973 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4974 stream_.callbackInfo.userData );
4976 // Handle return value from callback
4977 if ( callbackResult == 1 ) {
4978 // instantiate a thread to stop this thread
4979 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4980 if ( !threadHandle ) {
4981 errorType = RtAudioError::THREAD_ERROR;
4982 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4985 else if ( !CloseHandle( threadHandle ) ) {
4986 errorType = RtAudioError::THREAD_ERROR;
4987 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4991 callbackStopped = true;
4993 else if ( callbackResult == 2 ) {
4994 // instantiate a thread to stop this thread
4995 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4996 if ( !threadHandle ) {
4997 errorType = RtAudioError::THREAD_ERROR;
4998 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5001 else if ( !CloseHandle( threadHandle ) ) {
5002 errorType = RtAudioError::THREAD_ERROR;
5003 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5007 callbackStopped = true;
5014 // 1. Convert callback buffer to stream format
5015 // 2. Push callback buffer into outputBuffer
5017 if ( renderAudioClient && callbackPulled ) {
5018 if ( stream_.doConvertBuffer[OUTPUT] ) {
5019 // Convert callback buffer to stream format
5020 convertBuffer( stream_.deviceBuffer,
5021 stream_.userBuffer[OUTPUT],
5022 stream_.convertInfo[OUTPUT] );
5026 // Push callback buffer into outputBuffer
5027 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5028 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5029 stream_.deviceFormat[OUTPUT] );
5032 // if there is no render stream, set callbackPushed flag
5033 callbackPushed = true;
5038 // 1. Get capture buffer from stream
5039 // 2. Push capture buffer into inputBuffer
5040 // 3. If 2. was successful: Release capture buffer
5042 if ( captureAudioClient ) {
5043 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5044 if ( !callbackPulled ) {
5045 WaitForSingleObject( captureEvent, INFINITE );
5048 // Get capture buffer from stream
5049 hr = captureClient->GetBuffer( &streamBuffer,
5051 &captureFlags, NULL, NULL );
5052 if ( FAILED( hr ) ) {
5053 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5057 if ( bufferFrameCount != 0 ) {
5058 // Push capture buffer into inputBuffer
5059 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5060 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5061 stream_.deviceFormat[INPUT] ) )
5063 // Release capture buffer
5064 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5065 if ( FAILED( hr ) ) {
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5072 // Inform WASAPI that capture was unsuccessful
5073 hr = captureClient->ReleaseBuffer( 0 );
5074 if ( FAILED( hr ) ) {
5075 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5082 // Inform WASAPI that capture was unsuccessful
5083 hr = captureClient->ReleaseBuffer( 0 );
5084 if ( FAILED( hr ) ) {
5085 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5093 // 1. Get render buffer from stream
5094 // 2. Pull next buffer from outputBuffer
5095 // 3. If 2. was successful: Fill render buffer with next buffer
5096 // Release render buffer
5098 if ( renderAudioClient ) {
5099 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5100 if ( callbackPulled && !callbackPushed ) {
5101 WaitForSingleObject( renderEvent, INFINITE );
5104 // Get render buffer from stream
5105 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5106 if ( FAILED( hr ) ) {
5107 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5111 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5112 if ( FAILED( hr ) ) {
5113 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5117 bufferFrameCount -= numFramesPadding;
5119 if ( bufferFrameCount != 0 ) {
5120 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5121 if ( FAILED( hr ) ) {
5122 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5126 // Pull next buffer from outputBuffer
5127 // Fill render buffer with next buffer
5128 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5129 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5130 stream_.deviceFormat[OUTPUT] ) )
5132 // Release render buffer
5133 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5134 if ( FAILED( hr ) ) {
5135 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5141 // Inform WASAPI that render was unsuccessful
5142 hr = renderClient->ReleaseBuffer( 0, 0 );
5143 if ( FAILED( hr ) ) {
5144 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5151 // Inform WASAPI that render was unsuccessful
5152 hr = renderClient->ReleaseBuffer( 0, 0 );
5153 if ( FAILED( hr ) ) {
5154 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5160 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5161 if ( callbackPushed ) {
5162 callbackPulled = false;
5164 RtApi::tickStreamTime();
5171 CoTaskMemFree( captureFormat );
5172 CoTaskMemFree( renderFormat );
5176 // update stream state
5177 stream_.state = STREAM_STOPPED;
5179 if ( errorText_.empty() )
5185 //******************** End of __WINDOWS_WASAPI__ *********************//
5189 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5191 // Modified by Robin Davies, October 2005
5192 // - Improvements to DirectX pointer chasing.
5193 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5194 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5195 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5196 // Changed device query structure for RtAudio 4.0.7, January 2010
5198 #include <windows.h>
5199 #include <process.h>
5200 #include <mmsystem.h>
5204 #include <algorithm>
5206 #if defined(__MINGW32__)
5207 // missing from latest mingw winapi
5208 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5209 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5210 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5211 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5214 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5216 #ifdef _MSC_VER // if Microsoft Visual C++
5217 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5220 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5222 if ( pointer > bufferSize ) pointer -= bufferSize;
5223 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5224 if ( pointer < earlierPointer ) pointer += bufferSize;
5225 return pointer >= earlierPointer && pointer < laterPointer;
5228 // A structure to hold various information related to the DirectSound
5229 // API implementation.
5231 unsigned int drainCounter; // Tracks callback counts when draining
5232 bool internalDrain; // Indicates if stop is initiated from callback or not.
5236 UINT bufferPointer[2];
5237 DWORD dsBufferSize[2];
5238 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5242 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5245 // Declarations for utility functions, callbacks, and structures
5246 // specific to the DirectSound implementation.
5247 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5248 LPCTSTR description,
5252 static const char* getErrorString( int code );
5254 static unsigned __stdcall callbackHandler( void *ptr );
5263 : found(false) { validId[0] = false; validId[1] = false; }
5266 struct DsProbeData {
5268 std::vector<struct DsDevice>* dsDevices;
5271 RtApiDs :: RtApiDs()
5273 // Dsound will run both-threaded. If CoInitialize fails, then just
5274 // accept whatever the mainline chose for a threading model.
5275 coInitialized_ = false;
5276 HRESULT hr = CoInitialize( NULL );
5277 if ( !FAILED( hr ) ) coInitialized_ = true;
5280 RtApiDs :: ~RtApiDs()
5282 if ( stream_.state != STREAM_CLOSED ) closeStream();
5283 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5286 // The DirectSound default output is always the first device.
5287 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5292 // The DirectSound default input is always the first input device,
5293 // which is the first capture device enumerated.
5294 unsigned int RtApiDs :: getDefaultInputDevice( void )
5299 unsigned int RtApiDs :: getDeviceCount( void )
5301 // Set query flag for previously found devices to false, so that we
5302 // can check for any devices that have disappeared.
5303 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5304 dsDevices[i].found = false;
5306 // Query DirectSound devices.
5307 struct DsProbeData probeInfo;
5308 probeInfo.isInput = false;
5309 probeInfo.dsDevices = &dsDevices;
5310 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5311 if ( FAILED( result ) ) {
5312 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5313 errorText_ = errorStream_.str();
5314 error( RtAudioError::WARNING );
5317 // Query DirectSoundCapture devices.
5318 probeInfo.isInput = true;
5319 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5320 if ( FAILED( result ) ) {
5321 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5322 errorText_ = errorStream_.str();
5323 error( RtAudioError::WARNING );
5326 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5327 for ( unsigned int i=0; i<dsDevices.size(); ) {
5328 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5332 return static_cast<unsigned int>(dsDevices.size());
5335 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5337 RtAudio::DeviceInfo info;
5338 info.probed = false;
5340 if ( dsDevices.size() == 0 ) {
5341 // Force a query of all devices
5343 if ( dsDevices.size() == 0 ) {
5344 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5345 error( RtAudioError::INVALID_USE );
5350 if ( device >= dsDevices.size() ) {
5351 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5352 error( RtAudioError::INVALID_USE );
5357 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5359 LPDIRECTSOUND output;
5361 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5362 if ( FAILED( result ) ) {
5363 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5364 errorText_ = errorStream_.str();
5365 error( RtAudioError::WARNING );
5369 outCaps.dwSize = sizeof( outCaps );
5370 result = output->GetCaps( &outCaps );
5371 if ( FAILED( result ) ) {
5373 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5374 errorText_ = errorStream_.str();
5375 error( RtAudioError::WARNING );
5379 // Get output channel information.
5380 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5382 // Get sample rate information.
5383 info.sampleRates.clear();
5384 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5385 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5386 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5387 info.sampleRates.push_back( SAMPLE_RATES[k] );
5389 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5390 info.preferredSampleRate = SAMPLE_RATES[k];
5394 // Get format information.
5395 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5396 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5400 if ( getDefaultOutputDevice() == device )
5401 info.isDefaultOutput = true;
5403 if ( dsDevices[ device ].validId[1] == false ) {
5404 info.name = dsDevices[ device ].name;
5411 LPDIRECTSOUNDCAPTURE input;
5412 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5413 if ( FAILED( result ) ) {
5414 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5415 errorText_ = errorStream_.str();
5416 error( RtAudioError::WARNING );
5421 inCaps.dwSize = sizeof( inCaps );
5422 result = input->GetCaps( &inCaps );
5423 if ( FAILED( result ) ) {
5425 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5426 errorText_ = errorStream_.str();
5427 error( RtAudioError::WARNING );
5431 // Get input channel information.
5432 info.inputChannels = inCaps.dwChannels;
5434 // Get sample rate and format information.
5435 std::vector<unsigned int> rates;
5436 if ( inCaps.dwChannels >= 2 ) {
5437 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5438 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5439 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5440 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5441 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5442 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5443 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5444 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5446 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5447 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5448 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5449 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5450 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5452 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5453 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5454 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5455 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5456 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5459 else if ( inCaps.dwChannels == 1 ) {
5460 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5461 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5462 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5463 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5464 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5465 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5466 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5467 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5469 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5470 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5471 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5472 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5473 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5475 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5476 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5477 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5478 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5479 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5482 else info.inputChannels = 0; // technically, this would be an error
5486 if ( info.inputChannels == 0 ) return info;
5488 // Copy the supported rates to the info structure but avoid duplication.
5490 for ( unsigned int i=0; i<rates.size(); i++ ) {
5492 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5493 if ( rates[i] == info.sampleRates[j] ) {
5498 if ( found == false ) info.sampleRates.push_back( rates[i] );
5500 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5502 // If device opens for both playback and capture, we determine the channels.
5503 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5504 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5506 if ( device == 0 ) info.isDefaultInput = true;
5508 // Copy name and return.
5509 info.name = dsDevices[ device ].name;
5514 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5515 unsigned int firstChannel, unsigned int sampleRate,
5516 RtAudioFormat format, unsigned int *bufferSize,
5517 RtAudio::StreamOptions *options )
5519 if ( channels + firstChannel > 2 ) {
5520 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5524 size_t nDevices = dsDevices.size();
5525 if ( nDevices == 0 ) {
5526 // This should not happen because a check is made before this function is called.
5527 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5531 if ( device >= nDevices ) {
5532 // This should not happen because a check is made before this function is called.
5533 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5537 if ( mode == OUTPUT ) {
5538 if ( dsDevices[ device ].validId[0] == false ) {
5539 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5540 errorText_ = errorStream_.str();
5544 else { // mode == INPUT
5545 if ( dsDevices[ device ].validId[1] == false ) {
5546 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5547 errorText_ = errorStream_.str();
5552 // According to a note in PortAudio, using GetDesktopWindow()
5553 // instead of GetForegroundWindow() is supposed to avoid problems
5554 // that occur when the application's window is not the foreground
5555 // window. Also, if the application window closes before the
5556 // DirectSound buffer, DirectSound can crash. In the past, I had
5557 // problems when using GetDesktopWindow() but it seems fine now
5558 // (January 2010). I'll leave it commented here.
5559 // HWND hWnd = GetForegroundWindow();
5560 HWND hWnd = GetDesktopWindow();
5562 // Check the numberOfBuffers parameter and limit the lowest value to
5563 // two. This is a judgement call and a value of two is probably too
5564 // low for capture, but it should work for playback.
5566 if ( options ) nBuffers = options->numberOfBuffers;
5567 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5568 if ( nBuffers < 2 ) nBuffers = 3;
5570 // Check the lower range of the user-specified buffer size and set
5571 // (arbitrarily) to a lower bound of 32.
5572 if ( *bufferSize < 32 ) *bufferSize = 32;
5574 // Create the wave format structure. The data format setting will
5575 // be determined later.
5576 WAVEFORMATEX waveFormat;
5577 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5578 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5579 waveFormat.nChannels = channels + firstChannel;
5580 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5582 // Determine the device buffer size. By default, we'll use the value
5583 // defined above (32K), but we will grow it to make allowances for
5584 // very large software buffer sizes.
5585 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5586 DWORD dsPointerLeadTime = 0;
5588 void *ohandle = 0, *bhandle = 0;
5590 if ( mode == OUTPUT ) {
5592 LPDIRECTSOUND output;
5593 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5594 if ( FAILED( result ) ) {
5595 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5596 errorText_ = errorStream_.str();
5601 outCaps.dwSize = sizeof( outCaps );
5602 result = output->GetCaps( &outCaps );
5603 if ( FAILED( result ) ) {
5605 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5606 errorText_ = errorStream_.str();
5610 // Check channel information.
5611 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5612 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5613 errorText_ = errorStream_.str();
5617 // Check format information. Use 16-bit format unless not
5618 // supported or user requests 8-bit.
5619 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5620 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5621 waveFormat.wBitsPerSample = 16;
5622 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5625 waveFormat.wBitsPerSample = 8;
5626 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5628 stream_.userFormat = format;
5630 // Update wave format structure and buffer information.
5631 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5632 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5633 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5635 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5636 while ( dsPointerLeadTime * 2U > dsBufferSize )
5639 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5640 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5641 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5642 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5643 if ( FAILED( result ) ) {
5645 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5646 errorText_ = errorStream_.str();
5650 // Even though we will write to the secondary buffer, we need to
5651 // access the primary buffer to set the correct output format
5652 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5653 // buffer description.
5654 DSBUFFERDESC bufferDescription;
5655 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5656 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5657 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5659 // Obtain the primary buffer
5660 LPDIRECTSOUNDBUFFER buffer;
5661 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5662 if ( FAILED( result ) ) {
5664 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5665 errorText_ = errorStream_.str();
5669 // Set the primary DS buffer sound format.
5670 result = buffer->SetFormat( &waveFormat );
5671 if ( FAILED( result ) ) {
5673 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5674 errorText_ = errorStream_.str();
5678 // Setup the secondary DS buffer description.
5679 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5680 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5681 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5682 DSBCAPS_GLOBALFOCUS |
5683 DSBCAPS_GETCURRENTPOSITION2 |
5684 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5685 bufferDescription.dwBufferBytes = dsBufferSize;
5686 bufferDescription.lpwfxFormat = &waveFormat;
5688 // Try to create the secondary DS buffer. If that doesn't work,
5689 // try to use software mixing. Otherwise, there's a problem.
5690 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5691 if ( FAILED( result ) ) {
5692 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5693 DSBCAPS_GLOBALFOCUS |
5694 DSBCAPS_GETCURRENTPOSITION2 |
5695 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5696 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5697 if ( FAILED( result ) ) {
5699 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5700 errorText_ = errorStream_.str();
5705 // Get the buffer size ... might be different from what we specified.
5707 dsbcaps.dwSize = sizeof( DSBCAPS );
5708 result = buffer->GetCaps( &dsbcaps );
5709 if ( FAILED( result ) ) {
5712 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5713 errorText_ = errorStream_.str();
5717 dsBufferSize = dsbcaps.dwBufferBytes;
5719 // Lock the DS buffer
5722 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5723 if ( FAILED( result ) ) {
5726 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5727 errorText_ = errorStream_.str();
5731 // Zero the DS buffer
5732 ZeroMemory( audioPtr, dataLen );
5734 // Unlock the DS buffer
5735 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5736 if ( FAILED( result ) ) {
5739 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5740 errorText_ = errorStream_.str();
5744 ohandle = (void *) output;
5745 bhandle = (void *) buffer;
5748 if ( mode == INPUT ) {
5750 LPDIRECTSOUNDCAPTURE input;
5751 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5752 if ( FAILED( result ) ) {
5753 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5754 errorText_ = errorStream_.str();
5759 inCaps.dwSize = sizeof( inCaps );
5760 result = input->GetCaps( &inCaps );
5761 if ( FAILED( result ) ) {
5763 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5764 errorText_ = errorStream_.str();
5768 // Check channel information.
5769 if ( inCaps.dwChannels < channels + firstChannel ) {
5770 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5774 // Check format information. Use 16-bit format unless user
5776 DWORD deviceFormats;
5777 if ( channels + firstChannel == 2 ) {
5778 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5779 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5780 waveFormat.wBitsPerSample = 8;
5781 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5783 else { // assume 16-bit is supported
5784 waveFormat.wBitsPerSample = 16;
5785 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5788 else { // channel == 1
5789 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5790 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5791 waveFormat.wBitsPerSample = 8;
5792 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5794 else { // assume 16-bit is supported
5795 waveFormat.wBitsPerSample = 16;
5796 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5799 stream_.userFormat = format;
5801 // Update wave format structure and buffer information.
5802 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5803 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5804 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5806 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5807 while ( dsPointerLeadTime * 2U > dsBufferSize )
5810 // Setup the secondary DS buffer description.
5811 DSCBUFFERDESC bufferDescription;
5812 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5813 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5814 bufferDescription.dwFlags = 0;
5815 bufferDescription.dwReserved = 0;
5816 bufferDescription.dwBufferBytes = dsBufferSize;
5817 bufferDescription.lpwfxFormat = &waveFormat;
5819 // Create the capture buffer.
5820 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5821 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5822 if ( FAILED( result ) ) {
5824 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5825 errorText_ = errorStream_.str();
5829 // Get the buffer size ... might be different from what we specified.
5831 dscbcaps.dwSize = sizeof( DSCBCAPS );
5832 result = buffer->GetCaps( &dscbcaps );
5833 if ( FAILED( result ) ) {
5836 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5837 errorText_ = errorStream_.str();
5841 dsBufferSize = dscbcaps.dwBufferBytes;
5843 // NOTE: We could have a problem here if this is a duplex stream
5844 // and the play and capture hardware buffer sizes are different
5845 // (I'm actually not sure if that is a problem or not).
5846 // Currently, we are not verifying that.
5848 // Lock the capture buffer
5851 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5852 if ( FAILED( result ) ) {
5855 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5856 errorText_ = errorStream_.str();
5861 ZeroMemory( audioPtr, dataLen );
5863 // Unlock the buffer
5864 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5865 if ( FAILED( result ) ) {
5868 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5869 errorText_ = errorStream_.str();
5873 ohandle = (void *) input;
5874 bhandle = (void *) buffer;
5877 // Set various stream parameters
5878 DsHandle *handle = 0;
5879 stream_.nDeviceChannels[mode] = channels + firstChannel;
5880 stream_.nUserChannels[mode] = channels;
5881 stream_.bufferSize = *bufferSize;
5882 stream_.channelOffset[mode] = firstChannel;
5883 stream_.deviceInterleaved[mode] = true;
5884 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5885 else stream_.userInterleaved = true;
5887 // Set flag for buffer conversion
5888 stream_.doConvertBuffer[mode] = false;
5889 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5890 stream_.doConvertBuffer[mode] = true;
5891 if (stream_.userFormat != stream_.deviceFormat[mode])
5892 stream_.doConvertBuffer[mode] = true;
5893 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5894 stream_.nUserChannels[mode] > 1 )
5895 stream_.doConvertBuffer[mode] = true;
5897 // Allocate necessary internal buffers
5898 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5899 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5900 if ( stream_.userBuffer[mode] == NULL ) {
5901 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5905 if ( stream_.doConvertBuffer[mode] ) {
5907 bool makeBuffer = true;
5908 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5909 if ( mode == INPUT ) {
5910 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5911 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5912 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5917 bufferBytes *= *bufferSize;
5918 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5919 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5920 if ( stream_.deviceBuffer == NULL ) {
5921 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5927 // Allocate our DsHandle structures for the stream.
5928 if ( stream_.apiHandle == 0 ) {
5930 handle = new DsHandle;
5932 catch ( std::bad_alloc& ) {
5933 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5937 // Create a manual-reset event.
5938 handle->condition = CreateEvent( NULL, // no security
5939 TRUE, // manual-reset
5940 FALSE, // non-signaled initially
5942 stream_.apiHandle = (void *) handle;
5945 handle = (DsHandle *) stream_.apiHandle;
5946 handle->id[mode] = ohandle;
5947 handle->buffer[mode] = bhandle;
5948 handle->dsBufferSize[mode] = dsBufferSize;
5949 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5951 stream_.device[mode] = device;
5952 stream_.state = STREAM_STOPPED;
5953 if ( stream_.mode == OUTPUT && mode == INPUT )
5954 // We had already set up an output stream.
5955 stream_.mode = DUPLEX;
5957 stream_.mode = mode;
5958 stream_.nBuffers = nBuffers;
5959 stream_.sampleRate = sampleRate;
5961 // Setup the buffer conversion information structure.
5962 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5964 // Setup the callback thread.
5965 if ( stream_.callbackInfo.isRunning == false ) {
5967 stream_.callbackInfo.isRunning = true;
5968 stream_.callbackInfo.object = (void *) this;
5969 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5970 &stream_.callbackInfo, 0, &threadId );
5971 if ( stream_.callbackInfo.thread == 0 ) {
5972 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5976 // Boost DS thread priority
5977 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5983 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5984 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5985 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5986 if ( buffer ) buffer->Release();
5989 if ( handle->buffer[1] ) {
5990 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5991 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5992 if ( buffer ) buffer->Release();
5995 CloseHandle( handle->condition );
5997 stream_.apiHandle = 0;
6000 for ( int i=0; i<2; i++ ) {
6001 if ( stream_.userBuffer[i] ) {
6002 free( stream_.userBuffer[i] );
6003 stream_.userBuffer[i] = 0;
6007 if ( stream_.deviceBuffer ) {
6008 free( stream_.deviceBuffer );
6009 stream_.deviceBuffer = 0;
6012 stream_.state = STREAM_CLOSED;
6016 void RtApiDs :: closeStream()
6018 if ( stream_.state == STREAM_CLOSED ) {
6019 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6020 error( RtAudioError::WARNING );
6024 // Stop the callback thread.
6025 stream_.callbackInfo.isRunning = false;
6026 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6027 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6029 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6031 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6032 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6033 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6040 if ( handle->buffer[1] ) {
6041 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6042 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6049 CloseHandle( handle->condition );
6051 stream_.apiHandle = 0;
6054 for ( int i=0; i<2; i++ ) {
6055 if ( stream_.userBuffer[i] ) {
6056 free( stream_.userBuffer[i] );
6057 stream_.userBuffer[i] = 0;
6061 if ( stream_.deviceBuffer ) {
6062 free( stream_.deviceBuffer );
6063 stream_.deviceBuffer = 0;
6066 stream_.mode = UNINITIALIZED;
6067 stream_.state = STREAM_CLOSED;
6070 void RtApiDs :: startStream()
6073 if ( stream_.state == STREAM_RUNNING ) {
6074 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6075 error( RtAudioError::WARNING );
6079 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6081 // Increase scheduler frequency on lesser windows (a side-effect of
6082 // increasing timer accuracy). On greater windows (Win2K or later),
6083 // this is already in effect.
6084 timeBeginPeriod( 1 );
6086 buffersRolling = false;
6087 duplexPrerollBytes = 0;
6089 if ( stream_.mode == DUPLEX ) {
6090 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6091 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6095 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6097 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6098 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6099 if ( FAILED( result ) ) {
6100 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6101 errorText_ = errorStream_.str();
6106 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6108 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6109 result = buffer->Start( DSCBSTART_LOOPING );
6110 if ( FAILED( result ) ) {
6111 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6112 errorText_ = errorStream_.str();
6117 handle->drainCounter = 0;
6118 handle->internalDrain = false;
6119 ResetEvent( handle->condition );
6120 stream_.state = STREAM_RUNNING;
6123 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6126 void RtApiDs :: stopStream()
6129 if ( stream_.state == STREAM_STOPPED ) {
6130 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6131 error( RtAudioError::WARNING );
6138 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6139 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6140 if ( handle->drainCounter == 0 ) {
6141 handle->drainCounter = 2;
6142 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6145 stream_.state = STREAM_STOPPED;
6147 MUTEX_LOCK( &stream_.mutex );
6149 // Stop the buffer and clear memory
6150 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6151 result = buffer->Stop();
6152 if ( FAILED( result ) ) {
6153 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6154 errorText_ = errorStream_.str();
6158 // Lock the buffer and clear it so that if we start to play again,
6159 // we won't have old data playing.
6160 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6161 if ( FAILED( result ) ) {
6162 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6163 errorText_ = errorStream_.str();
6167 // Zero the DS buffer
6168 ZeroMemory( audioPtr, dataLen );
6170 // Unlock the DS buffer
6171 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6172 if ( FAILED( result ) ) {
6173 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6174 errorText_ = errorStream_.str();
6178 // If we start playing again, we must begin at beginning of buffer.
6179 handle->bufferPointer[0] = 0;
6182 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6183 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6187 stream_.state = STREAM_STOPPED;
6189 if ( stream_.mode != DUPLEX )
6190 MUTEX_LOCK( &stream_.mutex );
6192 result = buffer->Stop();
6193 if ( FAILED( result ) ) {
6194 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6195 errorText_ = errorStream_.str();
6199 // Lock the buffer and clear it so that if we start to play again,
6200 // we won't have old data playing.
6201 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6202 if ( FAILED( result ) ) {
6203 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6204 errorText_ = errorStream_.str();
6208 // Zero the DS buffer
6209 ZeroMemory( audioPtr, dataLen );
6211 // Unlock the DS buffer
6212 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6213 if ( FAILED( result ) ) {
6214 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6215 errorText_ = errorStream_.str();
6219 // If we start recording again, we must begin at beginning of buffer.
6220 handle->bufferPointer[1] = 0;
6224 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6225 MUTEX_UNLOCK( &stream_.mutex );
6227 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6230 void RtApiDs :: abortStream()
6233 if ( stream_.state == STREAM_STOPPED ) {
6234 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6235 error( RtAudioError::WARNING );
6239 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6240 handle->drainCounter = 2;
6245 void RtApiDs :: callbackEvent()
6247 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6248 Sleep( 50 ); // sleep 50 milliseconds
6252 if ( stream_.state == STREAM_CLOSED ) {
6253 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6254 error( RtAudioError::WARNING );
6258 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6259 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6261 // Check if we were draining the stream and signal is finished.
6262 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6264 stream_.state = STREAM_STOPPING;
6265 if ( handle->internalDrain == false )
6266 SetEvent( handle->condition );
6272 // Invoke user callback to get fresh output data UNLESS we are
6274 if ( handle->drainCounter == 0 ) {
6275 RtAudioCallback callback = (RtAudioCallback) info->callback;
6276 double streamTime = getStreamTime();
6277 RtAudioStreamStatus status = 0;
6278 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6279 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6280 handle->xrun[0] = false;
6282 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6283 status |= RTAUDIO_INPUT_OVERFLOW;
6284 handle->xrun[1] = false;
6286 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6287 stream_.bufferSize, streamTime, status, info->userData );
6288 if ( cbReturnValue == 2 ) {
6289 stream_.state = STREAM_STOPPING;
6290 handle->drainCounter = 2;
6294 else if ( cbReturnValue == 1 ) {
6295 handle->drainCounter = 1;
6296 handle->internalDrain = true;
6301 DWORD currentWritePointer, safeWritePointer;
6302 DWORD currentReadPointer, safeReadPointer;
6303 UINT nextWritePointer;
6305 LPVOID buffer1 = NULL;
6306 LPVOID buffer2 = NULL;
6307 DWORD bufferSize1 = 0;
6308 DWORD bufferSize2 = 0;
6313 MUTEX_LOCK( &stream_.mutex );
6314 if ( stream_.state == STREAM_STOPPED ) {
6315 MUTEX_UNLOCK( &stream_.mutex );
6319 if ( buffersRolling == false ) {
6320 if ( stream_.mode == DUPLEX ) {
6321 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6323 // It takes a while for the devices to get rolling. As a result,
6324 // there's no guarantee that the capture and write device pointers
6325 // will move in lockstep. Wait here for both devices to start
6326 // rolling, and then set our buffer pointers accordingly.
6327 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6328 // bytes later than the write buffer.
6330 // Stub: a serious risk of having a pre-emptive scheduling round
6331 // take place between the two GetCurrentPosition calls... but I'm
6332 // really not sure how to solve the problem. Temporarily boost to
6333 // Realtime priority, maybe; but I'm not sure what priority the
6334 // DirectSound service threads run at. We *should* be roughly
6335 // within a ms or so of correct.
6337 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6338 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6340 DWORD startSafeWritePointer, startSafeReadPointer;
6342 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6343 if ( FAILED( result ) ) {
6344 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6345 errorText_ = errorStream_.str();
6346 MUTEX_UNLOCK( &stream_.mutex );
6347 error( RtAudioError::SYSTEM_ERROR );
6350 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6351 if ( FAILED( result ) ) {
6352 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6353 errorText_ = errorStream_.str();
6354 MUTEX_UNLOCK( &stream_.mutex );
6355 error( RtAudioError::SYSTEM_ERROR );
6359 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6360 if ( FAILED( result ) ) {
6361 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6362 errorText_ = errorStream_.str();
6363 MUTEX_UNLOCK( &stream_.mutex );
6364 error( RtAudioError::SYSTEM_ERROR );
6367 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6368 if ( FAILED( result ) ) {
6369 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6370 errorText_ = errorStream_.str();
6371 MUTEX_UNLOCK( &stream_.mutex );
6372 error( RtAudioError::SYSTEM_ERROR );
6375 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6379 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6381 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6382 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6383 handle->bufferPointer[1] = safeReadPointer;
6385 else if ( stream_.mode == OUTPUT ) {
6387 // Set the proper nextWritePosition after initial startup.
6388 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6389 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6390 if ( FAILED( result ) ) {
6391 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6392 errorText_ = errorStream_.str();
6393 MUTEX_UNLOCK( &stream_.mutex );
6394 error( RtAudioError::SYSTEM_ERROR );
6397 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6398 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6401 buffersRolling = true;
6404 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6406 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6408 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6409 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6410 bufferBytes *= formatBytes( stream_.userFormat );
6411 memset( stream_.userBuffer[0], 0, bufferBytes );
6414 // Setup parameters and do buffer conversion if necessary.
6415 if ( stream_.doConvertBuffer[0] ) {
6416 buffer = stream_.deviceBuffer;
6417 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6418 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6419 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6422 buffer = stream_.userBuffer[0];
6423 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6424 bufferBytes *= formatBytes( stream_.userFormat );
6427 // No byte swapping necessary in DirectSound implementation.
6429 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6430 // unsigned. So, we need to convert our signed 8-bit data here to
6432 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6433 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6435 DWORD dsBufferSize = handle->dsBufferSize[0];
6436 nextWritePointer = handle->bufferPointer[0];
6438 DWORD endWrite, leadPointer;
6440 // Find out where the read and "safe write" pointers are.
6441 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6442 if ( FAILED( result ) ) {
6443 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6444 errorText_ = errorStream_.str();
6445 MUTEX_UNLOCK( &stream_.mutex );
6446 error( RtAudioError::SYSTEM_ERROR );
6450 // We will copy our output buffer into the region between
6451 // safeWritePointer and leadPointer. If leadPointer is not
6452 // beyond the next endWrite position, wait until it is.
6453 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6454 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6455 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6456 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6457 endWrite = nextWritePointer + bufferBytes;
6459 // Check whether the entire write region is behind the play pointer.
6460 if ( leadPointer >= endWrite ) break;
6462 // If we are here, then we must wait until the leadPointer advances
6463 // beyond the end of our next write region. We use the
6464 // Sleep() function to suspend operation until that happens.
6465 double millis = ( endWrite - leadPointer ) * 1000.0;
6466 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6467 if ( millis < 1.0 ) millis = 1.0;
6468 Sleep( (DWORD) millis );
6471 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6472 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6473 // We've strayed into the forbidden zone ... resync the read pointer.
6474 handle->xrun[0] = true;
6475 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6476 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6477 handle->bufferPointer[0] = nextWritePointer;
6478 endWrite = nextWritePointer + bufferBytes;
6481 // Lock free space in the buffer
6482 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6483 &bufferSize1, &buffer2, &bufferSize2, 0 );
6484 if ( FAILED( result ) ) {
6485 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6486 errorText_ = errorStream_.str();
6487 MUTEX_UNLOCK( &stream_.mutex );
6488 error( RtAudioError::SYSTEM_ERROR );
6492 // Copy our buffer into the DS buffer
6493 CopyMemory( buffer1, buffer, bufferSize1 );
6494 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6496 // Update our buffer offset and unlock sound buffer
6497 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6498 if ( FAILED( result ) ) {
6499 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6500 errorText_ = errorStream_.str();
6501 MUTEX_UNLOCK( &stream_.mutex );
6502 error( RtAudioError::SYSTEM_ERROR );
6505 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6506 handle->bufferPointer[0] = nextWritePointer;
6509 // Don't bother draining input
6510 if ( handle->drainCounter ) {
6511 handle->drainCounter++;
6515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6517 // Setup parameters.
6518 if ( stream_.doConvertBuffer[1] ) {
6519 buffer = stream_.deviceBuffer;
6520 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6521 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6524 buffer = stream_.userBuffer[1];
6525 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6526 bufferBytes *= formatBytes( stream_.userFormat );
6529 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6530 long nextReadPointer = handle->bufferPointer[1];
6531 DWORD dsBufferSize = handle->dsBufferSize[1];
6533 // Find out where the write and "safe read" pointers are.
6534 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6535 if ( FAILED( result ) ) {
6536 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6537 errorText_ = errorStream_.str();
6538 MUTEX_UNLOCK( &stream_.mutex );
6539 error( RtAudioError::SYSTEM_ERROR );
6543 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6544 DWORD endRead = nextReadPointer + bufferBytes;
6546 // Handling depends on whether we are INPUT or DUPLEX.
6547 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6548 // then a wait here will drag the write pointers into the forbidden zone.
6550 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6551 // it's in a safe position. This causes dropouts, but it seems to be the only
6552 // practical way to sync up the read and write pointers reliably, given the
6553 // the very complex relationship between phase and increment of the read and write
6556 // In order to minimize audible dropouts in DUPLEX mode, we will
6557 // provide a pre-roll period of 0.5 seconds in which we return
6558 // zeros from the read buffer while the pointers sync up.
6560 if ( stream_.mode == DUPLEX ) {
6561 if ( safeReadPointer < endRead ) {
6562 if ( duplexPrerollBytes <= 0 ) {
6563 // Pre-roll time over. Be more agressive.
6564 int adjustment = endRead-safeReadPointer;
6566 handle->xrun[1] = true;
6568 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6569 // and perform fine adjustments later.
6570 // - small adjustments: back off by twice as much.
6571 if ( adjustment >= 2*bufferBytes )
6572 nextReadPointer = safeReadPointer-2*bufferBytes;
6574 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6576 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6580 // In pre=roll time. Just do it.
6581 nextReadPointer = safeReadPointer - bufferBytes;
6582 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6584 endRead = nextReadPointer + bufferBytes;
6587 else { // mode == INPUT
6588 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6589 // See comments for playback.
6590 double millis = (endRead - safeReadPointer) * 1000.0;
6591 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6592 if ( millis < 1.0 ) millis = 1.0;
6593 Sleep( (DWORD) millis );
6595 // Wake up and find out where we are now.
6596 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6597 if ( FAILED( result ) ) {
6598 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6599 errorText_ = errorStream_.str();
6600 MUTEX_UNLOCK( &stream_.mutex );
6601 error( RtAudioError::SYSTEM_ERROR );
6605 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6609 // Lock free space in the buffer
6610 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6611 &bufferSize1, &buffer2, &bufferSize2, 0 );
6612 if ( FAILED( result ) ) {
6613 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6614 errorText_ = errorStream_.str();
6615 MUTEX_UNLOCK( &stream_.mutex );
6616 error( RtAudioError::SYSTEM_ERROR );
6620 if ( duplexPrerollBytes <= 0 ) {
6621 // Copy our buffer into the DS buffer
6622 CopyMemory( buffer, buffer1, bufferSize1 );
6623 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6626 memset( buffer, 0, bufferSize1 );
6627 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6628 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6631 // Update our buffer offset and unlock sound buffer
6632 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6633 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6634 if ( FAILED( result ) ) {
6635 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6636 errorText_ = errorStream_.str();
6637 MUTEX_UNLOCK( &stream_.mutex );
6638 error( RtAudioError::SYSTEM_ERROR );
6641 handle->bufferPointer[1] = nextReadPointer;
6643 // No byte swapping necessary in DirectSound implementation.
6645 // If necessary, convert 8-bit data from unsigned to signed.
6646 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6647 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6649 // Do buffer conversion if necessary.
6650 if ( stream_.doConvertBuffer[1] )
6651 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6655 MUTEX_UNLOCK( &stream_.mutex );
6656 RtApi::tickStreamTime();
6659 // Definitions for utility functions and callbacks
6660 // specific to the DirectSound implementation.
6662 static unsigned __stdcall callbackHandler( void *ptr )
6664 CallbackInfo *info = (CallbackInfo *) ptr;
6665 RtApiDs *object = (RtApiDs *) info->object;
6666 bool* isRunning = &info->isRunning;
6668 while ( *isRunning == true ) {
6669 object->callbackEvent();
6676 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6677 LPCTSTR description,
6681 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6682 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6685 bool validDevice = false;
6686 if ( probeInfo.isInput == true ) {
6688 LPDIRECTSOUNDCAPTURE object;
6690 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6691 if ( hr != DS_OK ) return TRUE;
6693 caps.dwSize = sizeof(caps);
6694 hr = object->GetCaps( &caps );
6695 if ( hr == DS_OK ) {
6696 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6703 LPDIRECTSOUND object;
6704 hr = DirectSoundCreate( lpguid, &object, NULL );
6705 if ( hr != DS_OK ) return TRUE;
6707 caps.dwSize = sizeof(caps);
6708 hr = object->GetCaps( &caps );
6709 if ( hr == DS_OK ) {
6710 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6716 // If good device, then save its name and guid.
6717 std::string name = convertCharPointerToStdString( description );
6718 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6719 if ( lpguid == NULL )
6720 name = "Default Device";
6721 if ( validDevice ) {
6722 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6723 if ( dsDevices[i].name == name ) {
6724 dsDevices[i].found = true;
6725 if ( probeInfo.isInput ) {
6726 dsDevices[i].id[1] = lpguid;
6727 dsDevices[i].validId[1] = true;
6730 dsDevices[i].id[0] = lpguid;
6731 dsDevices[i].validId[0] = true;
6739 device.found = true;
6740 if ( probeInfo.isInput ) {
6741 device.id[1] = lpguid;
6742 device.validId[1] = true;
6745 device.id[0] = lpguid;
6746 device.validId[0] = true;
6748 dsDevices.push_back( device );
6754 static const char* getErrorString( int code )
6758 case DSERR_ALLOCATED:
6759 return "Already allocated";
6761 case DSERR_CONTROLUNAVAIL:
6762 return "Control unavailable";
6764 case DSERR_INVALIDPARAM:
6765 return "Invalid parameter";
6767 case DSERR_INVALIDCALL:
6768 return "Invalid call";
6771 return "Generic error";
6773 case DSERR_PRIOLEVELNEEDED:
6774 return "Priority level needed";
6776 case DSERR_OUTOFMEMORY:
6777 return "Out of memory";
6779 case DSERR_BADFORMAT:
6780 return "The sample rate or the channel format is not supported";
6782 case DSERR_UNSUPPORTED:
6783 return "Not supported";
6785 case DSERR_NODRIVER:
6788 case DSERR_ALREADYINITIALIZED:
6789 return "Already initialized";
6791 case DSERR_NOAGGREGATION:
6792 return "No aggregation";
6794 case DSERR_BUFFERLOST:
6795 return "Buffer lost";
6797 case DSERR_OTHERAPPHASPRIO:
6798 return "Another application already has priority";
6800 case DSERR_UNINITIALIZED:
6801 return "Uninitialized";
6804 return "DirectSound unknown error";
6807 //******************** End of __WINDOWS_DS__ *********************//
6811 #if defined(__LINUX_ALSA__)
6813 #include <alsa/asoundlib.h>
6816 // A structure to hold various information related to the ALSA API
6819 snd_pcm_t *handles[2];
6822 pthread_cond_t runnable_cv;
6826 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6829 static void *alsaCallbackHandler( void * ptr );
6831 RtApiAlsa :: RtApiAlsa()
6833 // Nothing to do here.
6836 RtApiAlsa :: ~RtApiAlsa()
6838 if ( stream_.state != STREAM_CLOSED ) closeStream();
6841 unsigned int RtApiAlsa :: getDeviceCount( void )
6843 unsigned nDevices = 0;
6844 int result, subdevice, card;
6848 // Count cards and devices
6850 snd_card_next( &card );
6851 while ( card >= 0 ) {
6852 sprintf( name, "hw:%d", card );
6853 result = snd_ctl_open( &handle, name, 0 );
6855 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6856 errorText_ = errorStream_.str();
6857 error( RtAudioError::WARNING );
6862 result = snd_ctl_pcm_next_device( handle, &subdevice );
6864 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6865 errorText_ = errorStream_.str();
6866 error( RtAudioError::WARNING );
6869 if ( subdevice < 0 )
6874 snd_ctl_close( handle );
6875 snd_card_next( &card );
6878 result = snd_ctl_open( &handle, "default", 0 );
6881 snd_ctl_close( handle );
6887 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6889 RtAudio::DeviceInfo info;
6890 info.probed = false;
6892 unsigned nDevices = 0;
6893 int result, subdevice, card;
6897 // Count cards and devices
6900 snd_card_next( &card );
6901 while ( card >= 0 ) {
6902 sprintf( name, "hw:%d", card );
6903 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6905 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6906 errorText_ = errorStream_.str();
6907 error( RtAudioError::WARNING );
6912 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6914 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6915 errorText_ = errorStream_.str();
6916 error( RtAudioError::WARNING );
6919 if ( subdevice < 0 ) break;
6920 if ( nDevices == device ) {
6921 sprintf( name, "hw:%d,%d", card, subdevice );
6927 snd_ctl_close( chandle );
6928 snd_card_next( &card );
6931 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6932 if ( result == 0 ) {
6933 if ( nDevices == device ) {
6934 strcpy( name, "default" );
6940 if ( nDevices == 0 ) {
6941 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6942 error( RtAudioError::INVALID_USE );
6946 if ( device >= nDevices ) {
6947 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6948 error( RtAudioError::INVALID_USE );
6954 // If a stream is already open, we cannot probe the stream devices.
6955 // Thus, use the saved results.
6956 if ( stream_.state != STREAM_CLOSED &&
6957 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6958 snd_ctl_close( chandle );
6959 if ( device >= devices_.size() ) {
6960 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6961 error( RtAudioError::WARNING );
6964 return devices_[ device ];
6967 int openMode = SND_PCM_ASYNC;
6968 snd_pcm_stream_t stream;
6969 snd_pcm_info_t *pcminfo;
6970 snd_pcm_info_alloca( &pcminfo );
6972 snd_pcm_hw_params_t *params;
6973 snd_pcm_hw_params_alloca( ¶ms );
6975 // First try for playback unless default device (which has subdev -1)
6976 stream = SND_PCM_STREAM_PLAYBACK;
6977 snd_pcm_info_set_stream( pcminfo, stream );
6978 if ( subdevice != -1 ) {
6979 snd_pcm_info_set_device( pcminfo, subdevice );
6980 snd_pcm_info_set_subdevice( pcminfo, 0 );
6982 result = snd_ctl_pcm_info( chandle, pcminfo );
6984 // Device probably doesn't support playback.
6989 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6991 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6992 errorText_ = errorStream_.str();
6993 error( RtAudioError::WARNING );
6997 // The device is open ... fill the parameter structure.
6998 result = snd_pcm_hw_params_any( phandle, params );
7000 snd_pcm_close( phandle );
7001 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7002 errorText_ = errorStream_.str();
7003 error( RtAudioError::WARNING );
7007 // Get output channel information.
7009 result = snd_pcm_hw_params_get_channels_max( params, &value );
7011 snd_pcm_close( phandle );
7012 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7013 errorText_ = errorStream_.str();
7014 error( RtAudioError::WARNING );
7017 info.outputChannels = value;
7018 snd_pcm_close( phandle );
7021 stream = SND_PCM_STREAM_CAPTURE;
7022 snd_pcm_info_set_stream( pcminfo, stream );
7024 // Now try for capture unless default device (with subdev = -1)
7025 if ( subdevice != -1 ) {
7026 result = snd_ctl_pcm_info( chandle, pcminfo );
7027 snd_ctl_close( chandle );
7029 // Device probably doesn't support capture.
7030 if ( info.outputChannels == 0 ) return info;
7031 goto probeParameters;
7035 snd_ctl_close( chandle );
7037 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7039 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7040 errorText_ = errorStream_.str();
7041 error( RtAudioError::WARNING );
7042 if ( info.outputChannels == 0 ) return info;
7043 goto probeParameters;
7046 // The device is open ... fill the parameter structure.
7047 result = snd_pcm_hw_params_any( phandle, params );
7049 snd_pcm_close( phandle );
7050 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7051 errorText_ = errorStream_.str();
7052 error( RtAudioError::WARNING );
7053 if ( info.outputChannels == 0 ) return info;
7054 goto probeParameters;
7057 result = snd_pcm_hw_params_get_channels_max( params, &value );
7059 snd_pcm_close( phandle );
7060 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7061 errorText_ = errorStream_.str();
7062 error( RtAudioError::WARNING );
7063 if ( info.outputChannels == 0 ) return info;
7064 goto probeParameters;
7066 info.inputChannels = value;
7067 snd_pcm_close( phandle );
7069 // If device opens for both playback and capture, we determine the channels.
7070 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7071 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7073 // ALSA doesn't provide default devices so we'll use the first available one.
7074 if ( device == 0 && info.outputChannels > 0 )
7075 info.isDefaultOutput = true;
7076 if ( device == 0 && info.inputChannels > 0 )
7077 info.isDefaultInput = true;
7080 // At this point, we just need to figure out the supported data
7081 // formats and sample rates. We'll proceed by opening the device in
7082 // the direction with the maximum number of channels, or playback if
7083 // they are equal. This might limit our sample rate options, but so
7086 if ( info.outputChannels >= info.inputChannels )
7087 stream = SND_PCM_STREAM_PLAYBACK;
7089 stream = SND_PCM_STREAM_CAPTURE;
7090 snd_pcm_info_set_stream( pcminfo, stream );
7092 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7094 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7095 errorText_ = errorStream_.str();
7096 error( RtAudioError::WARNING );
7100 // The device is open ... fill the parameter structure.
7101 result = snd_pcm_hw_params_any( phandle, params );
7103 snd_pcm_close( phandle );
7104 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7105 errorText_ = errorStream_.str();
7106 error( RtAudioError::WARNING );
7110 // Test our discrete set of sample rate values.
7111 info.sampleRates.clear();
7112 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7113 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7114 info.sampleRates.push_back( SAMPLE_RATES[i] );
7116 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7117 info.preferredSampleRate = SAMPLE_RATES[i];
7120 if ( info.sampleRates.size() == 0 ) {
7121 snd_pcm_close( phandle );
7122 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7123 errorText_ = errorStream_.str();
7124 error( RtAudioError::WARNING );
7128 // Probe the supported data formats ... we don't care about endian-ness just yet
7129 snd_pcm_format_t format;
7130 info.nativeFormats = 0;
7131 format = SND_PCM_FORMAT_S8;
7132 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7133 info.nativeFormats |= RTAUDIO_SINT8;
7134 format = SND_PCM_FORMAT_S16;
7135 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7136 info.nativeFormats |= RTAUDIO_SINT16;
7137 format = SND_PCM_FORMAT_S24;
7138 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7139 info.nativeFormats |= RTAUDIO_SINT24;
7140 format = SND_PCM_FORMAT_S32;
7141 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7142 info.nativeFormats |= RTAUDIO_SINT32;
7143 format = SND_PCM_FORMAT_FLOAT;
7144 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7145 info.nativeFormats |= RTAUDIO_FLOAT32;
7146 format = SND_PCM_FORMAT_FLOAT64;
7147 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7148 info.nativeFormats |= RTAUDIO_FLOAT64;
7150 // Check that we have at least one supported format
7151 if ( info.nativeFormats == 0 ) {
7152 snd_pcm_close( phandle );
7153 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7154 errorText_ = errorStream_.str();
7155 error( RtAudioError::WARNING );
7159 // Get the device name
7161 result = snd_card_get_name( card, &cardname );
7162 if ( result >= 0 ) {
7163 sprintf( name, "hw:%s,%d", cardname, subdevice );
7168 // That's all ... close the device and return
7169 snd_pcm_close( phandle );
7174 void RtApiAlsa :: saveDeviceInfo( void )
7178 unsigned int nDevices = getDeviceCount();
7179 devices_.resize( nDevices );
7180 for ( unsigned int i=0; i<nDevices; i++ )
7181 devices_[i] = getDeviceInfo( i );
7184 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7185 unsigned int firstChannel, unsigned int sampleRate,
7186 RtAudioFormat format, unsigned int *bufferSize,
7187 RtAudio::StreamOptions *options )
7190 #if defined(__RTAUDIO_DEBUG__)
7192 snd_output_stdio_attach(&out, stderr, 0);
7195 // I'm not using the "plug" interface ... too much inconsistent behavior.
7197 unsigned nDevices = 0;
7198 int result, subdevice, card;
7202 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7203 snprintf(name, sizeof(name), "%s", "default");
7205 // Count cards and devices
7207 snd_card_next( &card );
7208 while ( card >= 0 ) {
7209 sprintf( name, "hw:%d", card );
7210 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7212 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7213 errorText_ = errorStream_.str();
7218 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7219 if ( result < 0 ) break;
7220 if ( subdevice < 0 ) break;
7221 if ( nDevices == device ) {
7222 sprintf( name, "hw:%d,%d", card, subdevice );
7223 snd_ctl_close( chandle );
7228 snd_ctl_close( chandle );
7229 snd_card_next( &card );
7232 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7233 if ( result == 0 ) {
7234 if ( nDevices == device ) {
7235 strcpy( name, "default" );
7241 if ( nDevices == 0 ) {
7242 // This should not happen because a check is made before this function is called.
7243 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7247 if ( device >= nDevices ) {
7248 // This should not happen because a check is made before this function is called.
7249 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7256 // The getDeviceInfo() function will not work for a device that is
7257 // already open. Thus, we'll probe the system before opening a
7258 // stream and save the results for use by getDeviceInfo().
7259 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7260 this->saveDeviceInfo();
7262 snd_pcm_stream_t stream;
7263 if ( mode == OUTPUT )
7264 stream = SND_PCM_STREAM_PLAYBACK;
7266 stream = SND_PCM_STREAM_CAPTURE;
7269 int openMode = SND_PCM_ASYNC;
7270 result = snd_pcm_open( &phandle, name, stream, openMode );
7272 if ( mode == OUTPUT )
7273 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7275 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7276 errorText_ = errorStream_.str();
7280 // Fill the parameter structure.
7281 snd_pcm_hw_params_t *hw_params;
7282 snd_pcm_hw_params_alloca( &hw_params );
7283 result = snd_pcm_hw_params_any( phandle, hw_params );
7285 snd_pcm_close( phandle );
7286 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7287 errorText_ = errorStream_.str();
7291 #if defined(__RTAUDIO_DEBUG__)
7292 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7293 snd_pcm_hw_params_dump( hw_params, out );
7296 // Set access ... check user preference.
7297 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7298 stream_.userInterleaved = false;
7299 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7301 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7302 stream_.deviceInterleaved[mode] = true;
7305 stream_.deviceInterleaved[mode] = false;
7308 stream_.userInterleaved = true;
7309 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7311 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7312 stream_.deviceInterleaved[mode] = false;
7315 stream_.deviceInterleaved[mode] = true;
7319 snd_pcm_close( phandle );
7320 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7321 errorText_ = errorStream_.str();
7325 // Determine how to set the device format.
7326 stream_.userFormat = format;
7327 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7329 if ( format == RTAUDIO_SINT8 )
7330 deviceFormat = SND_PCM_FORMAT_S8;
7331 else if ( format == RTAUDIO_SINT16 )
7332 deviceFormat = SND_PCM_FORMAT_S16;
7333 else if ( format == RTAUDIO_SINT24 )
7334 deviceFormat = SND_PCM_FORMAT_S24;
7335 else if ( format == RTAUDIO_SINT32 )
7336 deviceFormat = SND_PCM_FORMAT_S32;
7337 else if ( format == RTAUDIO_FLOAT32 )
7338 deviceFormat = SND_PCM_FORMAT_FLOAT;
7339 else if ( format == RTAUDIO_FLOAT64 )
7340 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7342 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7343 stream_.deviceFormat[mode] = format;
7347 // The user requested format is not natively supported by the device.
7348 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7349 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7350 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7354 deviceFormat = SND_PCM_FORMAT_FLOAT;
7355 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7356 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7360 deviceFormat = SND_PCM_FORMAT_S32;
7361 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7362 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7366 deviceFormat = SND_PCM_FORMAT_S24;
7367 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7368 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7372 deviceFormat = SND_PCM_FORMAT_S16;
7373 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7374 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7378 deviceFormat = SND_PCM_FORMAT_S8;
7379 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7380 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7384 // If we get here, no supported format was found.
7385 snd_pcm_close( phandle );
7386 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7387 errorText_ = errorStream_.str();
7391 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7393 snd_pcm_close( phandle );
7394 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7395 errorText_ = errorStream_.str();
7399 // Determine whether byte-swaping is necessary.
7400 stream_.doByteSwap[mode] = false;
7401 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7402 result = snd_pcm_format_cpu_endian( deviceFormat );
7404 stream_.doByteSwap[mode] = true;
7405 else if (result < 0) {
7406 snd_pcm_close( phandle );
7407 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7408 errorText_ = errorStream_.str();
7413 // Set the sample rate.
7414 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7416 snd_pcm_close( phandle );
7417 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7418 errorText_ = errorStream_.str();
7422 // Determine the number of channels for this device. We support a possible
7423 // minimum device channel number > than the value requested by the user.
7424 stream_.nUserChannels[mode] = channels;
7426 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7427 unsigned int deviceChannels = value;
7428 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7429 snd_pcm_close( phandle );
7430 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7431 errorText_ = errorStream_.str();
7435 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7437 snd_pcm_close( phandle );
7438 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7439 errorText_ = errorStream_.str();
7442 deviceChannels = value;
7443 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7444 stream_.nDeviceChannels[mode] = deviceChannels;
7446 // Set the device channels.
7447 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7449 snd_pcm_close( phandle );
7450 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7451 errorText_ = errorStream_.str();
7455 // Set the buffer (or period) size.
7457 snd_pcm_uframes_t periodSize = *bufferSize;
7458 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7460 snd_pcm_close( phandle );
7461 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7462 errorText_ = errorStream_.str();
7465 *bufferSize = periodSize;
7467 // Set the buffer number, which in ALSA is referred to as the "period".
7468 unsigned int periods = 0;
7469 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7470 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7471 if ( periods < 2 ) periods = 4; // a fairly safe default value
7472 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7474 snd_pcm_close( phandle );
7475 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7476 errorText_ = errorStream_.str();
7480 // If attempting to setup a duplex stream, the bufferSize parameter
7481 // MUST be the same in both directions!
7482 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7483 snd_pcm_close( phandle );
7484 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7485 errorText_ = errorStream_.str();
7489 stream_.bufferSize = *bufferSize;
7491 // Install the hardware configuration
7492 result = snd_pcm_hw_params( phandle, hw_params );
7494 snd_pcm_close( phandle );
7495 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7496 errorText_ = errorStream_.str();
7500 #if defined(__RTAUDIO_DEBUG__)
7501 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7502 snd_pcm_hw_params_dump( hw_params, out );
7505 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7506 snd_pcm_sw_params_t *sw_params = NULL;
7507 snd_pcm_sw_params_alloca( &sw_params );
7508 snd_pcm_sw_params_current( phandle, sw_params );
7509 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7510 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7511 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7513 // The following two settings were suggested by Theo Veenker
7514 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7515 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7517 // here are two options for a fix
7518 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7519 snd_pcm_uframes_t val;
7520 snd_pcm_sw_params_get_boundary( sw_params, &val );
7521 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7523 result = snd_pcm_sw_params( phandle, sw_params );
7525 snd_pcm_close( phandle );
7526 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7527 errorText_ = errorStream_.str();
7531 #if defined(__RTAUDIO_DEBUG__)
7532 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7533 snd_pcm_sw_params_dump( sw_params, out );
7536 // Set flags for buffer conversion
7537 stream_.doConvertBuffer[mode] = false;
7538 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7539 stream_.doConvertBuffer[mode] = true;
7540 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7541 stream_.doConvertBuffer[mode] = true;
7542 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7543 stream_.nUserChannels[mode] > 1 )
7544 stream_.doConvertBuffer[mode] = true;
7546 // Allocate the ApiHandle if necessary and then save.
7547 AlsaHandle *apiInfo = 0;
7548 if ( stream_.apiHandle == 0 ) {
7550 apiInfo = (AlsaHandle *) new AlsaHandle;
7552 catch ( std::bad_alloc& ) {
7553 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7557 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7558 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7562 stream_.apiHandle = (void *) apiInfo;
7563 apiInfo->handles[0] = 0;
7564 apiInfo->handles[1] = 0;
7567 apiInfo = (AlsaHandle *) stream_.apiHandle;
7569 apiInfo->handles[mode] = phandle;
7572 // Allocate necessary internal buffers.
7573 unsigned long bufferBytes;
7574 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7575 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7576 if ( stream_.userBuffer[mode] == NULL ) {
7577 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7581 if ( stream_.doConvertBuffer[mode] ) {
7583 bool makeBuffer = true;
7584 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7585 if ( mode == INPUT ) {
7586 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7587 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7588 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7593 bufferBytes *= *bufferSize;
7594 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7595 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7596 if ( stream_.deviceBuffer == NULL ) {
7597 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7603 stream_.sampleRate = sampleRate;
7604 stream_.nBuffers = periods;
7605 stream_.device[mode] = device;
7606 stream_.state = STREAM_STOPPED;
7608 // Setup the buffer conversion information structure.
7609 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7611 // Setup thread if necessary.
7612 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7613 // We had already set up an output stream.
7614 stream_.mode = DUPLEX;
7615 // Link the streams if possible.
7616 apiInfo->synchronized = false;
7617 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7618 apiInfo->synchronized = true;
7620 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7621 error( RtAudioError::WARNING );
7625 stream_.mode = mode;
7627 // Setup callback thread.
7628 stream_.callbackInfo.object = (void *) this;
7630 // Set the thread attributes for joinable and realtime scheduling
7631 // priority (optional). The higher priority will only take affect
7632 // if the program is run as root or suid. Note, under Linux
7633 // processes with CAP_SYS_NICE privilege, a user can change
7634 // scheduling policy and priority (thus need not be root). See
7635 // POSIX "capabilities".
7636 pthread_attr_t attr;
7637 pthread_attr_init( &attr );
7638 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7639 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7640 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7641 stream_.callbackInfo.doRealtime = true;
7642 struct sched_param param;
7643 int priority = options->priority;
7644 int min = sched_get_priority_min( SCHED_RR );
7645 int max = sched_get_priority_max( SCHED_RR );
7646 if ( priority < min ) priority = min;
7647 else if ( priority > max ) priority = max;
7648 param.sched_priority = priority;
7650 // Set the policy BEFORE the priority. Otherwise it fails.
7651 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7652 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7653 // This is definitely required. Otherwise it fails.
7654 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7655 pthread_attr_setschedparam(&attr, ¶m);
7658 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7660 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7663 stream_.callbackInfo.isRunning = true;
7664 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7665 pthread_attr_destroy( &attr );
7667 // Failed. Try instead with default attributes.
7668 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7670 stream_.callbackInfo.isRunning = false;
7671 errorText_ = "RtApiAlsa::error creating callback thread!";
7681 pthread_cond_destroy( &apiInfo->runnable_cv );
7682 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7683 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7685 stream_.apiHandle = 0;
7688 if ( phandle) snd_pcm_close( phandle );
7690 for ( int i=0; i<2; i++ ) {
7691 if ( stream_.userBuffer[i] ) {
7692 free( stream_.userBuffer[i] );
7693 stream_.userBuffer[i] = 0;
7697 if ( stream_.deviceBuffer ) {
7698 free( stream_.deviceBuffer );
7699 stream_.deviceBuffer = 0;
7702 stream_.state = STREAM_CLOSED;
7706 void RtApiAlsa :: closeStream()
7708 if ( stream_.state == STREAM_CLOSED ) {
7709 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7710 error( RtAudioError::WARNING );
7714 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7715 stream_.callbackInfo.isRunning = false;
7716 MUTEX_LOCK( &stream_.mutex );
7717 if ( stream_.state == STREAM_STOPPED ) {
7718 apiInfo->runnable = true;
7719 pthread_cond_signal( &apiInfo->runnable_cv );
7721 MUTEX_UNLOCK( &stream_.mutex );
7722 pthread_join( stream_.callbackInfo.thread, NULL );
7724 if ( stream_.state == STREAM_RUNNING ) {
7725 stream_.state = STREAM_STOPPED;
7726 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7727 snd_pcm_drop( apiInfo->handles[0] );
7728 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7729 snd_pcm_drop( apiInfo->handles[1] );
7733 pthread_cond_destroy( &apiInfo->runnable_cv );
7734 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7735 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7737 stream_.apiHandle = 0;
7740 for ( int i=0; i<2; i++ ) {
7741 if ( stream_.userBuffer[i] ) {
7742 free( stream_.userBuffer[i] );
7743 stream_.userBuffer[i] = 0;
7747 if ( stream_.deviceBuffer ) {
7748 free( stream_.deviceBuffer );
7749 stream_.deviceBuffer = 0;
7752 stream_.mode = UNINITIALIZED;
7753 stream_.state = STREAM_CLOSED;
7756 void RtApiAlsa :: startStream()
7758 // This method calls snd_pcm_prepare if the device isn't already in that state.
7761 if ( stream_.state == STREAM_RUNNING ) {
7762 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7763 error( RtAudioError::WARNING );
7767 MUTEX_LOCK( &stream_.mutex );
7770 snd_pcm_state_t state;
7771 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7772 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7773 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7774 state = snd_pcm_state( handle[0] );
7775 if ( state != SND_PCM_STATE_PREPARED ) {
7776 result = snd_pcm_prepare( handle[0] );
7778 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7779 errorText_ = errorStream_.str();
7785 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7786 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7787 state = snd_pcm_state( handle[1] );
7788 if ( state != SND_PCM_STATE_PREPARED ) {
7789 result = snd_pcm_prepare( handle[1] );
7791 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7792 errorText_ = errorStream_.str();
7798 stream_.state = STREAM_RUNNING;
7801 apiInfo->runnable = true;
7802 pthread_cond_signal( &apiInfo->runnable_cv );
7803 MUTEX_UNLOCK( &stream_.mutex );
7805 if ( result >= 0 ) return;
7806 error( RtAudioError::SYSTEM_ERROR );
7809 void RtApiAlsa :: stopStream()
7812 if ( stream_.state == STREAM_STOPPED ) {
7813 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7814 error( RtAudioError::WARNING );
7818 stream_.state = STREAM_STOPPED;
7819 MUTEX_LOCK( &stream_.mutex );
7822 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7823 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7824 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7825 if ( apiInfo->synchronized )
7826 result = snd_pcm_drop( handle[0] );
7828 result = snd_pcm_drain( handle[0] );
7830 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7831 errorText_ = errorStream_.str();
7836 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7837 result = snd_pcm_drop( handle[1] );
7839 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7840 errorText_ = errorStream_.str();
7846 apiInfo->runnable = false; // fixes high CPU usage when stopped
7847 MUTEX_UNLOCK( &stream_.mutex );
7849 if ( result >= 0 ) return;
7850 error( RtAudioError::SYSTEM_ERROR );
7853 void RtApiAlsa :: abortStream()
7856 if ( stream_.state == STREAM_STOPPED ) {
7857 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7858 error( RtAudioError::WARNING );
7862 stream_.state = STREAM_STOPPED;
7863 MUTEX_LOCK( &stream_.mutex );
7866 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7867 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7868 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7869 result = snd_pcm_drop( handle[0] );
7871 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7872 errorText_ = errorStream_.str();
7877 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7878 result = snd_pcm_drop( handle[1] );
7880 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7881 errorText_ = errorStream_.str();
7887 apiInfo->runnable = false; // fixes high CPU usage when stopped
7888 MUTEX_UNLOCK( &stream_.mutex );
7890 if ( result >= 0 ) return;
7891 error( RtAudioError::SYSTEM_ERROR );
7894 void RtApiAlsa :: callbackEvent()
7896 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7897 if ( stream_.state == STREAM_STOPPED ) {
7898 MUTEX_LOCK( &stream_.mutex );
7899 while ( !apiInfo->runnable )
7900 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7902 if ( stream_.state != STREAM_RUNNING ) {
7903 MUTEX_UNLOCK( &stream_.mutex );
7906 MUTEX_UNLOCK( &stream_.mutex );
7909 if ( stream_.state == STREAM_CLOSED ) {
7910 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7911 error( RtAudioError::WARNING );
7915 int doStopStream = 0;
7916 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7917 double streamTime = getStreamTime();
7918 RtAudioStreamStatus status = 0;
7919 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7920 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7921 apiInfo->xrun[0] = false;
7923 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7924 status |= RTAUDIO_INPUT_OVERFLOW;
7925 apiInfo->xrun[1] = false;
7927 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7928 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7930 if ( doStopStream == 2 ) {
7935 MUTEX_LOCK( &stream_.mutex );
7937 // The state might change while waiting on a mutex.
7938 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7944 snd_pcm_sframes_t frames;
7945 RtAudioFormat format;
7946 handle = (snd_pcm_t **) apiInfo->handles;
7948 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7950 // Setup parameters.
7951 if ( stream_.doConvertBuffer[1] ) {
7952 buffer = stream_.deviceBuffer;
7953 channels = stream_.nDeviceChannels[1];
7954 format = stream_.deviceFormat[1];
7957 buffer = stream_.userBuffer[1];
7958 channels = stream_.nUserChannels[1];
7959 format = stream_.userFormat;
7962 // Read samples from device in interleaved/non-interleaved format.
7963 if ( stream_.deviceInterleaved[1] )
7964 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7966 void *bufs[channels];
7967 size_t offset = stream_.bufferSize * formatBytes( format );
7968 for ( int i=0; i<channels; i++ )
7969 bufs[i] = (void *) (buffer + (i * offset));
7970 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7973 if ( result < (int) stream_.bufferSize ) {
7974 // Either an error or overrun occured.
7975 if ( result == -EPIPE ) {
7976 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7977 if ( state == SND_PCM_STATE_XRUN ) {
7978 apiInfo->xrun[1] = true;
7979 result = snd_pcm_prepare( handle[1] );
7981 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7982 errorText_ = errorStream_.str();
7986 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7987 errorText_ = errorStream_.str();
7991 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7992 errorText_ = errorStream_.str();
7994 error( RtAudioError::WARNING );
7998 // Do byte swapping if necessary.
7999 if ( stream_.doByteSwap[1] )
8000 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8002 // Do buffer conversion if necessary.
8003 if ( stream_.doConvertBuffer[1] )
8004 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8006 // Check stream latency
8007 result = snd_pcm_delay( handle[1], &frames );
8008 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8013 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8015 // Setup parameters and do buffer conversion if necessary.
8016 if ( stream_.doConvertBuffer[0] ) {
8017 buffer = stream_.deviceBuffer;
8018 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8019 channels = stream_.nDeviceChannels[0];
8020 format = stream_.deviceFormat[0];
8023 buffer = stream_.userBuffer[0];
8024 channels = stream_.nUserChannels[0];
8025 format = stream_.userFormat;
8028 // Do byte swapping if necessary.
8029 if ( stream_.doByteSwap[0] )
8030 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8032 // Write samples to device in interleaved/non-interleaved format.
8033 if ( stream_.deviceInterleaved[0] )
8034 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8036 void *bufs[channels];
8037 size_t offset = stream_.bufferSize * formatBytes( format );
8038 for ( int i=0; i<channels; i++ )
8039 bufs[i] = (void *) (buffer + (i * offset));
8040 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8043 if ( result < (int) stream_.bufferSize ) {
8044 // Either an error or underrun occured.
8045 if ( result == -EPIPE ) {
8046 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8047 if ( state == SND_PCM_STATE_XRUN ) {
8048 apiInfo->xrun[0] = true;
8049 result = snd_pcm_prepare( handle[0] );
8051 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8052 errorText_ = errorStream_.str();
8055 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8058 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8059 errorText_ = errorStream_.str();
8063 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8064 errorText_ = errorStream_.str();
8066 error( RtAudioError::WARNING );
8070 // Check stream latency
8071 result = snd_pcm_delay( handle[0], &frames );
8072 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8076 MUTEX_UNLOCK( &stream_.mutex );
8078 RtApi::tickStreamTime();
8079 if ( doStopStream == 1 ) this->stopStream();
8082 static void *alsaCallbackHandler( void *ptr )
8084 CallbackInfo *info = (CallbackInfo *) ptr;
8085 RtApiAlsa *object = (RtApiAlsa *) info->object;
8086 bool *isRunning = &info->isRunning;
8088 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8089 if ( info->doRealtime ) {
8090 std::cerr << "RtAudio alsa: " <<
8091 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8092 "running realtime scheduling" << std::endl;
8096 while ( *isRunning == true ) {
8097 pthread_testcancel();
8098 object->callbackEvent();
8101 pthread_exit( NULL );
8104 //******************** End of __LINUX_ALSA__ *********************//
8107 #if defined(__LINUX_PULSE__)
8109 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8110 // and Tristan Matthews.
8112 #include <pulse/error.h>
8113 #include <pulse/simple.h>
8116 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8117 44100, 48000, 96000, 0};
8119 struct rtaudio_pa_format_mapping_t {
8120 RtAudioFormat rtaudio_format;
8121 pa_sample_format_t pa_format;
8124 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8125 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8126 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8127 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8128 {0, PA_SAMPLE_INVALID}};
8130 struct PulseAudioHandle {
8134 pthread_cond_t runnable_cv;
8136 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8139 RtApiPulse::~RtApiPulse()
8141 if ( stream_.state != STREAM_CLOSED )
8145 unsigned int RtApiPulse::getDeviceCount( void )
8150 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8152 RtAudio::DeviceInfo info;
8154 info.name = "PulseAudio";
8155 info.outputChannels = 2;
8156 info.inputChannels = 2;
8157 info.duplexChannels = 2;
8158 info.isDefaultOutput = true;
8159 info.isDefaultInput = true;
8161 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8162 info.sampleRates.push_back( *sr );
8164 info.preferredSampleRate = 48000;
8165 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8170 static void *pulseaudio_callback( void * user )
8172 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8173 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8174 volatile bool *isRunning = &cbi->isRunning;
8176 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8177 if (cbi->doRealtime) {
8178 std::cerr << "RtAudio pulse: " <<
8179 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8180 "running realtime scheduling" << std::endl;
8184 while ( *isRunning ) {
8185 pthread_testcancel();
8186 context->callbackEvent();
8189 pthread_exit( NULL );
8192 void RtApiPulse::closeStream( void )
8194 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8196 stream_.callbackInfo.isRunning = false;
8198 MUTEX_LOCK( &stream_.mutex );
8199 if ( stream_.state == STREAM_STOPPED ) {
8200 pah->runnable = true;
8201 pthread_cond_signal( &pah->runnable_cv );
8203 MUTEX_UNLOCK( &stream_.mutex );
8205 pthread_join( pah->thread, 0 );
8206 if ( pah->s_play ) {
8207 pa_simple_flush( pah->s_play, NULL );
8208 pa_simple_free( pah->s_play );
8211 pa_simple_free( pah->s_rec );
8213 pthread_cond_destroy( &pah->runnable_cv );
8215 stream_.apiHandle = 0;
8218 if ( stream_.userBuffer[0] ) {
8219 free( stream_.userBuffer[0] );
8220 stream_.userBuffer[0] = 0;
8222 if ( stream_.userBuffer[1] ) {
8223 free( stream_.userBuffer[1] );
8224 stream_.userBuffer[1] = 0;
8227 stream_.state = STREAM_CLOSED;
8228 stream_.mode = UNINITIALIZED;
8231 void RtApiPulse::callbackEvent( void )
8233 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8235 if ( stream_.state == STREAM_STOPPED ) {
8236 MUTEX_LOCK( &stream_.mutex );
8237 while ( !pah->runnable )
8238 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8240 if ( stream_.state != STREAM_RUNNING ) {
8241 MUTEX_UNLOCK( &stream_.mutex );
8244 MUTEX_UNLOCK( &stream_.mutex );
8247 if ( stream_.state == STREAM_CLOSED ) {
8248 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8249 "this shouldn't happen!";
8250 error( RtAudioError::WARNING );
8254 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8255 double streamTime = getStreamTime();
8256 RtAudioStreamStatus status = 0;
8257 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8258 stream_.bufferSize, streamTime, status,
8259 stream_.callbackInfo.userData );
8261 if ( doStopStream == 2 ) {
8266 MUTEX_LOCK( &stream_.mutex );
8267 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8268 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8270 if ( stream_.state != STREAM_RUNNING )
8275 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8276 if ( stream_.doConvertBuffer[OUTPUT] ) {
8277 convertBuffer( stream_.deviceBuffer,
8278 stream_.userBuffer[OUTPUT],
8279 stream_.convertInfo[OUTPUT] );
8280 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8281 formatBytes( stream_.deviceFormat[OUTPUT] );
8283 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8284 formatBytes( stream_.userFormat );
8286 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8287 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8288 pa_strerror( pa_error ) << ".";
8289 errorText_ = errorStream_.str();
8290 error( RtAudioError::WARNING );
8294 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8295 if ( stream_.doConvertBuffer[INPUT] )
8296 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8297 formatBytes( stream_.deviceFormat[INPUT] );
8299 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8300 formatBytes( stream_.userFormat );
8302 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8303 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8304 pa_strerror( pa_error ) << ".";
8305 errorText_ = errorStream_.str();
8306 error( RtAudioError::WARNING );
8308 if ( stream_.doConvertBuffer[INPUT] ) {
8309 convertBuffer( stream_.userBuffer[INPUT],
8310 stream_.deviceBuffer,
8311 stream_.convertInfo[INPUT] );
8316 MUTEX_UNLOCK( &stream_.mutex );
8317 RtApi::tickStreamTime();
8319 if ( doStopStream == 1 )
8323 void RtApiPulse::startStream( void )
8325 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8327 if ( stream_.state == STREAM_CLOSED ) {
8328 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8329 error( RtAudioError::INVALID_USE );
8332 if ( stream_.state == STREAM_RUNNING ) {
8333 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8334 error( RtAudioError::WARNING );
8338 MUTEX_LOCK( &stream_.mutex );
8340 stream_.state = STREAM_RUNNING;
8342 pah->runnable = true;
8343 pthread_cond_signal( &pah->runnable_cv );
8344 MUTEX_UNLOCK( &stream_.mutex );
8347 void RtApiPulse::stopStream( void )
8349 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8351 if ( stream_.state == STREAM_CLOSED ) {
8352 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8353 error( RtAudioError::INVALID_USE );
8356 if ( stream_.state == STREAM_STOPPED ) {
8357 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8358 error( RtAudioError::WARNING );
8362 stream_.state = STREAM_STOPPED;
8363 MUTEX_LOCK( &stream_.mutex );
8365 if ( pah && pah->s_play ) {
8367 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8368 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8369 pa_strerror( pa_error ) << ".";
8370 errorText_ = errorStream_.str();
8371 MUTEX_UNLOCK( &stream_.mutex );
8372 error( RtAudioError::SYSTEM_ERROR );
8377 stream_.state = STREAM_STOPPED;
8378 MUTEX_UNLOCK( &stream_.mutex );
8381 void RtApiPulse::abortStream( void )
8383 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8385 if ( stream_.state == STREAM_CLOSED ) {
8386 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8387 error( RtAudioError::INVALID_USE );
8390 if ( stream_.state == STREAM_STOPPED ) {
8391 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8392 error( RtAudioError::WARNING );
8396 stream_.state = STREAM_STOPPED;
8397 MUTEX_LOCK( &stream_.mutex );
8399 if ( pah && pah->s_play ) {
8401 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8402 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8403 pa_strerror( pa_error ) << ".";
8404 errorText_ = errorStream_.str();
8405 MUTEX_UNLOCK( &stream_.mutex );
8406 error( RtAudioError::SYSTEM_ERROR );
8411 stream_.state = STREAM_STOPPED;
8412 MUTEX_UNLOCK( &stream_.mutex );
8415 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8416 unsigned int channels, unsigned int firstChannel,
8417 unsigned int sampleRate, RtAudioFormat format,
8418 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8420 PulseAudioHandle *pah = 0;
8421 unsigned long bufferBytes = 0;
8424 if ( device != 0 ) return false;
8425 if ( mode != INPUT && mode != OUTPUT ) return false;
8426 if ( channels != 1 && channels != 2 ) {
8427 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8430 ss.channels = channels;
8432 if ( firstChannel != 0 ) return false;
8434 bool sr_found = false;
8435 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8436 if ( sampleRate == *sr ) {
8438 stream_.sampleRate = sampleRate;
8439 ss.rate = sampleRate;
8444 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8449 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8450 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8451 if ( format == sf->rtaudio_format ) {
8453 stream_.userFormat = sf->rtaudio_format;
8454 stream_.deviceFormat[mode] = stream_.userFormat;
8455 ss.format = sf->pa_format;
8459 if ( !sf_found ) { // Use internal data format conversion.
8460 stream_.userFormat = format;
8461 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8462 ss.format = PA_SAMPLE_FLOAT32LE;
8465 // Set other stream parameters.
8466 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8467 else stream_.userInterleaved = true;
8468 stream_.deviceInterleaved[mode] = true;
8469 stream_.nBuffers = 1;
8470 stream_.doByteSwap[mode] = false;
8471 stream_.nUserChannels[mode] = channels;
8472 stream_.nDeviceChannels[mode] = channels + firstChannel;
8473 stream_.channelOffset[mode] = 0;
8474 std::string streamName = "RtAudio";
8476 // Set flags for buffer conversion.
8477 stream_.doConvertBuffer[mode] = false;
8478 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8479 stream_.doConvertBuffer[mode] = true;
8480 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8481 stream_.doConvertBuffer[mode] = true;
8483 // Allocate necessary internal buffers.
8484 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8485 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8486 if ( stream_.userBuffer[mode] == NULL ) {
8487 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8490 stream_.bufferSize = *bufferSize;
8492 if ( stream_.doConvertBuffer[mode] ) {
8494 bool makeBuffer = true;
8495 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8496 if ( mode == INPUT ) {
8497 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8498 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8499 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8504 bufferBytes *= *bufferSize;
8505 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8506 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8507 if ( stream_.deviceBuffer == NULL ) {
8508 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8514 stream_.device[mode] = device;
8516 // Setup the buffer conversion information structure.
8517 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8519 if ( !stream_.apiHandle ) {
8520 PulseAudioHandle *pah = new PulseAudioHandle;
8522 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8526 stream_.apiHandle = pah;
8527 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8528 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8532 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8535 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8538 pa_buffer_attr buffer_attr;
8539 buffer_attr.fragsize = bufferBytes;
8540 buffer_attr.maxlength = -1;
8542 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8543 if ( !pah->s_rec ) {
8544 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8549 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8550 if ( !pah->s_play ) {
8551 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8559 if ( stream_.mode == UNINITIALIZED )
8560 stream_.mode = mode;
8561 else if ( stream_.mode == mode )
8564 stream_.mode = DUPLEX;
8566 if ( !stream_.callbackInfo.isRunning ) {
8567 stream_.callbackInfo.object = this;
8569 stream_.state = STREAM_STOPPED;
8570 // Set the thread attributes for joinable and realtime scheduling
8571 // priority (optional). The higher priority will only take affect
8572 // if the program is run as root or suid. Note, under Linux
8573 // processes with CAP_SYS_NICE privilege, a user can change
8574 // scheduling policy and priority (thus need not be root). See
8575 // POSIX "capabilities".
8576 pthread_attr_t attr;
8577 pthread_attr_init( &attr );
8578 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8579 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8580 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8581 stream_.callbackInfo.doRealtime = true;
8582 struct sched_param param;
8583 int priority = options->priority;
8584 int min = sched_get_priority_min( SCHED_RR );
8585 int max = sched_get_priority_max( SCHED_RR );
8586 if ( priority < min ) priority = min;
8587 else if ( priority > max ) priority = max;
8588 param.sched_priority = priority;
8590 // Set the policy BEFORE the priority. Otherwise it fails.
8591 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8592 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8593 // This is definitely required. Otherwise it fails.
8594 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8595 pthread_attr_setschedparam(&attr, ¶m);
8598 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8600 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8603 stream_.callbackInfo.isRunning = true;
8604 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8605 pthread_attr_destroy(&attr);
8607 // Failed. Try instead with default attributes.
8608 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8610 stream_.callbackInfo.isRunning = false;
8611 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8620 if ( pah && stream_.callbackInfo.isRunning ) {
8621 pthread_cond_destroy( &pah->runnable_cv );
8623 stream_.apiHandle = 0;
8626 for ( int i=0; i<2; i++ ) {
8627 if ( stream_.userBuffer[i] ) {
8628 free( stream_.userBuffer[i] );
8629 stream_.userBuffer[i] = 0;
8633 if ( stream_.deviceBuffer ) {
8634 free( stream_.deviceBuffer );
8635 stream_.deviceBuffer = 0;
8638 stream_.state = STREAM_CLOSED;
8642 //******************** End of __LINUX_PULSE__ *********************//
8645 #if defined(__LINUX_OSS__)
8648 #include <sys/ioctl.h>
8651 #include <sys/soundcard.h>
8655 static void *ossCallbackHandler(void * ptr);
8657 // A structure to hold various information related to the OSS API
8660 int id[2]; // device ids
8663 pthread_cond_t runnable;
8666 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8669 RtApiOss :: RtApiOss()
8671 // Nothing to do here.
8674 RtApiOss :: ~RtApiOss()
8676 if ( stream_.state != STREAM_CLOSED ) closeStream();
8679 unsigned int RtApiOss :: getDeviceCount( void )
8681 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8682 if ( mixerfd == -1 ) {
8683 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8684 error( RtAudioError::WARNING );
8688 oss_sysinfo sysinfo;
8689 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8691 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8692 error( RtAudioError::WARNING );
8697 return sysinfo.numaudios;
8700 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8702 RtAudio::DeviceInfo info;
8703 info.probed = false;
8705 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8706 if ( mixerfd == -1 ) {
8707 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8708 error( RtAudioError::WARNING );
8712 oss_sysinfo sysinfo;
8713 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8714 if ( result == -1 ) {
8716 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8717 error( RtAudioError::WARNING );
8721 unsigned nDevices = sysinfo.numaudios;
8722 if ( nDevices == 0 ) {
8724 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8725 error( RtAudioError::INVALID_USE );
8729 if ( device >= nDevices ) {
8731 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8732 error( RtAudioError::INVALID_USE );
8736 oss_audioinfo ainfo;
8738 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8740 if ( result == -1 ) {
8741 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8742 errorText_ = errorStream_.str();
8743 error( RtAudioError::WARNING );
8748 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8749 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8750 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8751 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8752 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8755 // Probe data formats ... do for input
8756 unsigned long mask = ainfo.iformats;
8757 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8758 info.nativeFormats |= RTAUDIO_SINT16;
8759 if ( mask & AFMT_S8 )
8760 info.nativeFormats |= RTAUDIO_SINT8;
8761 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8762 info.nativeFormats |= RTAUDIO_SINT32;
8764 if ( mask & AFMT_FLOAT )
8765 info.nativeFormats |= RTAUDIO_FLOAT32;
8767 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8768 info.nativeFormats |= RTAUDIO_SINT24;
8770 // Check that we have at least one supported format
8771 if ( info.nativeFormats == 0 ) {
8772 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8773 errorText_ = errorStream_.str();
8774 error( RtAudioError::WARNING );
8778 // Probe the supported sample rates.
8779 info.sampleRates.clear();
8780 if ( ainfo.nrates ) {
8781 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8782 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8783 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8784 info.sampleRates.push_back( SAMPLE_RATES[k] );
8786 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8787 info.preferredSampleRate = SAMPLE_RATES[k];
8795 // Check min and max rate values;
8796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8797 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8798 info.sampleRates.push_back( SAMPLE_RATES[k] );
8800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8801 info.preferredSampleRate = SAMPLE_RATES[k];
8806 if ( info.sampleRates.size() == 0 ) {
8807 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8808 errorText_ = errorStream_.str();
8809 error( RtAudioError::WARNING );
8813 info.name = ainfo.name;
8820 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8821 unsigned int firstChannel, unsigned int sampleRate,
8822 RtAudioFormat format, unsigned int *bufferSize,
8823 RtAudio::StreamOptions *options )
8825 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8826 if ( mixerfd == -1 ) {
8827 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8831 oss_sysinfo sysinfo;
8832 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8833 if ( result == -1 ) {
8835 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8839 unsigned nDevices = sysinfo.numaudios;
8840 if ( nDevices == 0 ) {
8841 // This should not happen because a check is made before this function is called.
8843 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8847 if ( device >= nDevices ) {
8848 // This should not happen because a check is made before this function is called.
8850 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8854 oss_audioinfo ainfo;
8856 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8858 if ( result == -1 ) {
8859 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8860 errorText_ = errorStream_.str();
8864 // Check if device supports input or output
8865 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8866 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8867 if ( mode == OUTPUT )
8868 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8870 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8871 errorText_ = errorStream_.str();
8876 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8877 if ( mode == OUTPUT )
8879 else { // mode == INPUT
8880 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8881 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8882 close( handle->id[0] );
8884 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8885 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8886 errorText_ = errorStream_.str();
8889 // Check that the number previously set channels is the same.
8890 if ( stream_.nUserChannels[0] != channels ) {
8891 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8892 errorText_ = errorStream_.str();
8901 // Set exclusive access if specified.
8902 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8904 // Try to open the device.
8906 fd = open( ainfo.devnode, flags, 0 );
8908 if ( errno == EBUSY )
8909 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8911 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8912 errorText_ = errorStream_.str();
8916 // For duplex operation, specifically set this mode (this doesn't seem to work).
8918 if ( flags | O_RDWR ) {
8919 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8920 if ( result == -1) {
8921 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8922 errorText_ = errorStream_.str();
8928 // Check the device channel support.
8929 stream_.nUserChannels[mode] = channels;
8930 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8932 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8933 errorText_ = errorStream_.str();
8937 // Set the number of channels.
8938 int deviceChannels = channels + firstChannel;
8939 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8940 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8942 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8943 errorText_ = errorStream_.str();
8946 stream_.nDeviceChannels[mode] = deviceChannels;
8948 // Get the data format mask
8950 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8951 if ( result == -1 ) {
8953 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8954 errorText_ = errorStream_.str();
8958 // Determine how to set the device format.
8959 stream_.userFormat = format;
8960 int deviceFormat = -1;
8961 stream_.doByteSwap[mode] = false;
8962 if ( format == RTAUDIO_SINT8 ) {
8963 if ( mask & AFMT_S8 ) {
8964 deviceFormat = AFMT_S8;
8965 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8968 else if ( format == RTAUDIO_SINT16 ) {
8969 if ( mask & AFMT_S16_NE ) {
8970 deviceFormat = AFMT_S16_NE;
8971 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8973 else if ( mask & AFMT_S16_OE ) {
8974 deviceFormat = AFMT_S16_OE;
8975 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8976 stream_.doByteSwap[mode] = true;
8979 else if ( format == RTAUDIO_SINT24 ) {
8980 if ( mask & AFMT_S24_NE ) {
8981 deviceFormat = AFMT_S24_NE;
8982 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8984 else if ( mask & AFMT_S24_OE ) {
8985 deviceFormat = AFMT_S24_OE;
8986 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8987 stream_.doByteSwap[mode] = true;
8990 else if ( format == RTAUDIO_SINT32 ) {
8991 if ( mask & AFMT_S32_NE ) {
8992 deviceFormat = AFMT_S32_NE;
8993 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8995 else if ( mask & AFMT_S32_OE ) {
8996 deviceFormat = AFMT_S32_OE;
8997 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8998 stream_.doByteSwap[mode] = true;
9002 if ( deviceFormat == -1 ) {
9003 // The user requested format is not natively supported by the device.
9004 if ( mask & AFMT_S16_NE ) {
9005 deviceFormat = AFMT_S16_NE;
9006 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9008 else if ( mask & AFMT_S32_NE ) {
9009 deviceFormat = AFMT_S32_NE;
9010 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9012 else if ( mask & AFMT_S24_NE ) {
9013 deviceFormat = AFMT_S24_NE;
9014 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9016 else if ( mask & AFMT_S16_OE ) {
9017 deviceFormat = AFMT_S16_OE;
9018 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9019 stream_.doByteSwap[mode] = true;
9021 else if ( mask & AFMT_S32_OE ) {
9022 deviceFormat = AFMT_S32_OE;
9023 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9024 stream_.doByteSwap[mode] = true;
9026 else if ( mask & AFMT_S24_OE ) {
9027 deviceFormat = AFMT_S24_OE;
9028 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9029 stream_.doByteSwap[mode] = true;
9031 else if ( mask & AFMT_S8) {
9032 deviceFormat = AFMT_S8;
9033 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9037 if ( stream_.deviceFormat[mode] == 0 ) {
9038 // This really shouldn't happen ...
9040 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9041 errorText_ = errorStream_.str();
9045 // Set the data format.
9046 int temp = deviceFormat;
9047 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9048 if ( result == -1 || deviceFormat != temp ) {
9050 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9051 errorText_ = errorStream_.str();
9055 // Attempt to set the buffer size. According to OSS, the minimum
9056 // number of buffers is two. The supposed minimum buffer size is 16
9057 // bytes, so that will be our lower bound. The argument to this
9058 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9059 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9060 // We'll check the actual value used near the end of the setup
9062 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9063 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9065 if ( options ) buffers = options->numberOfBuffers;
9066 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9067 if ( buffers < 2 ) buffers = 3;
9068 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9069 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9070 if ( result == -1 ) {
9072 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9073 errorText_ = errorStream_.str();
9076 stream_.nBuffers = buffers;
9078 // Save buffer size (in sample frames).
9079 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9080 stream_.bufferSize = *bufferSize;
9082 // Set the sample rate.
9083 int srate = sampleRate;
9084 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9085 if ( result == -1 ) {
9087 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9088 errorText_ = errorStream_.str();
9092 // Verify the sample rate setup worked.
9093 if ( abs( srate - (int)sampleRate ) > 100 ) {
9095 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9096 errorText_ = errorStream_.str();
9099 stream_.sampleRate = sampleRate;
9101 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9102 // We're doing duplex setup here.
9103 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9104 stream_.nDeviceChannels[0] = deviceChannels;
9107 // Set interleaving parameters.
9108 stream_.userInterleaved = true;
9109 stream_.deviceInterleaved[mode] = true;
9110 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9111 stream_.userInterleaved = false;
9113 // Set flags for buffer conversion
9114 stream_.doConvertBuffer[mode] = false;
9115 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9116 stream_.doConvertBuffer[mode] = true;
9117 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9118 stream_.doConvertBuffer[mode] = true;
9119 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9120 stream_.nUserChannels[mode] > 1 )
9121 stream_.doConvertBuffer[mode] = true;
9123 // Allocate the stream handles if necessary and then save.
9124 if ( stream_.apiHandle == 0 ) {
9126 handle = new OssHandle;
9128 catch ( std::bad_alloc& ) {
9129 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9133 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9134 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9138 stream_.apiHandle = (void *) handle;
9141 handle = (OssHandle *) stream_.apiHandle;
9143 handle->id[mode] = fd;
9145 // Allocate necessary internal buffers.
9146 unsigned long bufferBytes;
9147 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9148 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9149 if ( stream_.userBuffer[mode] == NULL ) {
9150 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9154 if ( stream_.doConvertBuffer[mode] ) {
9156 bool makeBuffer = true;
9157 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9158 if ( mode == INPUT ) {
9159 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9160 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9161 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9166 bufferBytes *= *bufferSize;
9167 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9168 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9169 if ( stream_.deviceBuffer == NULL ) {
9170 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9176 stream_.device[mode] = device;
9177 stream_.state = STREAM_STOPPED;
9179 // Setup the buffer conversion information structure.
9180 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9182 // Setup thread if necessary.
9183 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9184 // We had already set up an output stream.
9185 stream_.mode = DUPLEX;
9186 if ( stream_.device[0] == device ) handle->id[0] = fd;
9189 stream_.mode = mode;
9191 // Setup callback thread.
9192 stream_.callbackInfo.object = (void *) this;
9194 // Set the thread attributes for joinable and realtime scheduling
9195 // priority. The higher priority will only take affect if the
9196 // program is run as root or suid.
9197 pthread_attr_t attr;
9198 pthread_attr_init( &attr );
9199 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9200 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9201 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9202 stream_.callbackInfo.doRealtime = true;
9203 struct sched_param param;
9204 int priority = options->priority;
9205 int min = sched_get_priority_min( SCHED_RR );
9206 int max = sched_get_priority_max( SCHED_RR );
9207 if ( priority < min ) priority = min;
9208 else if ( priority > max ) priority = max;
9209 param.sched_priority = priority;
9211 // Set the policy BEFORE the priority. Otherwise it fails.
9212 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9213 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9214 // This is definitely required. Otherwise it fails.
9215 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9216 pthread_attr_setschedparam(&attr, ¶m);
9219 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9221 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9224 stream_.callbackInfo.isRunning = true;
9225 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9226 pthread_attr_destroy( &attr );
9228 // Failed. Try instead with default attributes.
9229 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9231 stream_.callbackInfo.isRunning = false;
9232 errorText_ = "RtApiOss::error creating callback thread!";
9242 pthread_cond_destroy( &handle->runnable );
9243 if ( handle->id[0] ) close( handle->id[0] );
9244 if ( handle->id[1] ) close( handle->id[1] );
9246 stream_.apiHandle = 0;
9249 for ( int i=0; i<2; i++ ) {
9250 if ( stream_.userBuffer[i] ) {
9251 free( stream_.userBuffer[i] );
9252 stream_.userBuffer[i] = 0;
9256 if ( stream_.deviceBuffer ) {
9257 free( stream_.deviceBuffer );
9258 stream_.deviceBuffer = 0;
9261 stream_.state = STREAM_CLOSED;
9265 void RtApiOss :: closeStream()
9267 if ( stream_.state == STREAM_CLOSED ) {
9268 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9269 error( RtAudioError::WARNING );
9273 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9274 stream_.callbackInfo.isRunning = false;
9275 MUTEX_LOCK( &stream_.mutex );
9276 if ( stream_.state == STREAM_STOPPED )
9277 pthread_cond_signal( &handle->runnable );
9278 MUTEX_UNLOCK( &stream_.mutex );
9279 pthread_join( stream_.callbackInfo.thread, NULL );
9281 if ( stream_.state == STREAM_RUNNING ) {
9282 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9283 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9285 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9286 stream_.state = STREAM_STOPPED;
9290 pthread_cond_destroy( &handle->runnable );
9291 if ( handle->id[0] ) close( handle->id[0] );
9292 if ( handle->id[1] ) close( handle->id[1] );
9294 stream_.apiHandle = 0;
9297 for ( int i=0; i<2; i++ ) {
9298 if ( stream_.userBuffer[i] ) {
9299 free( stream_.userBuffer[i] );
9300 stream_.userBuffer[i] = 0;
9304 if ( stream_.deviceBuffer ) {
9305 free( stream_.deviceBuffer );
9306 stream_.deviceBuffer = 0;
9309 stream_.mode = UNINITIALIZED;
9310 stream_.state = STREAM_CLOSED;
9313 void RtApiOss :: startStream()
9316 if ( stream_.state == STREAM_RUNNING ) {
9317 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9318 error( RtAudioError::WARNING );
9322 MUTEX_LOCK( &stream_.mutex );
9324 stream_.state = STREAM_RUNNING;
9326 // No need to do anything else here ... OSS automatically starts
9327 // when fed samples.
9329 MUTEX_UNLOCK( &stream_.mutex );
9331 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9332 pthread_cond_signal( &handle->runnable );
9335 void RtApiOss :: stopStream()
9338 if ( stream_.state == STREAM_STOPPED ) {
9339 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9340 error( RtAudioError::WARNING );
9344 MUTEX_LOCK( &stream_.mutex );
9346 // The state might change while waiting on a mutex.
9347 if ( stream_.state == STREAM_STOPPED ) {
9348 MUTEX_UNLOCK( &stream_.mutex );
9353 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9356 // Flush the output with zeros a few times.
9359 RtAudioFormat format;
9361 if ( stream_.doConvertBuffer[0] ) {
9362 buffer = stream_.deviceBuffer;
9363 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9364 format = stream_.deviceFormat[0];
9367 buffer = stream_.userBuffer[0];
9368 samples = stream_.bufferSize * stream_.nUserChannels[0];
9369 format = stream_.userFormat;
9372 memset( buffer, 0, samples * formatBytes(format) );
9373 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9374 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9375 if ( result == -1 ) {
9376 errorText_ = "RtApiOss::stopStream: audio write error.";
9377 error( RtAudioError::WARNING );
9381 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9382 if ( result == -1 ) {
9383 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9384 errorText_ = errorStream_.str();
9387 handle->triggered = false;
9390 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9391 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9392 if ( result == -1 ) {
9393 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9394 errorText_ = errorStream_.str();
9400 stream_.state = STREAM_STOPPED;
9401 MUTEX_UNLOCK( &stream_.mutex );
9403 if ( result != -1 ) return;
9404 error( RtAudioError::SYSTEM_ERROR );
9407 void RtApiOss :: abortStream()
9410 if ( stream_.state == STREAM_STOPPED ) {
9411 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9412 error( RtAudioError::WARNING );
9416 MUTEX_LOCK( &stream_.mutex );
9418 // The state might change while waiting on a mutex.
9419 if ( stream_.state == STREAM_STOPPED ) {
9420 MUTEX_UNLOCK( &stream_.mutex );
9425 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9426 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9427 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9428 if ( result == -1 ) {
9429 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9430 errorText_ = errorStream_.str();
9433 handle->triggered = false;
9436 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9437 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9438 if ( result == -1 ) {
9439 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9440 errorText_ = errorStream_.str();
9446 stream_.state = STREAM_STOPPED;
9447 MUTEX_UNLOCK( &stream_.mutex );
9449 if ( result != -1 ) return;
9450 error( RtAudioError::SYSTEM_ERROR );
9453 void RtApiOss :: callbackEvent()
9455 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9456 if ( stream_.state == STREAM_STOPPED ) {
9457 MUTEX_LOCK( &stream_.mutex );
9458 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9459 if ( stream_.state != STREAM_RUNNING ) {
9460 MUTEX_UNLOCK( &stream_.mutex );
9463 MUTEX_UNLOCK( &stream_.mutex );
9466 if ( stream_.state == STREAM_CLOSED ) {
9467 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9468 error( RtAudioError::WARNING );
9472 // Invoke user callback to get fresh output data.
9473 int doStopStream = 0;
9474 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9475 double streamTime = getStreamTime();
9476 RtAudioStreamStatus status = 0;
9477 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9478 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9479 handle->xrun[0] = false;
9481 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9482 status |= RTAUDIO_INPUT_OVERFLOW;
9483 handle->xrun[1] = false;
9485 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9486 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9487 if ( doStopStream == 2 ) {
9488 this->abortStream();
9492 MUTEX_LOCK( &stream_.mutex );
9494 // The state might change while waiting on a mutex.
9495 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9500 RtAudioFormat format;
9502 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9504 // Setup parameters and do buffer conversion if necessary.
9505 if ( stream_.doConvertBuffer[0] ) {
9506 buffer = stream_.deviceBuffer;
9507 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9508 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9509 format = stream_.deviceFormat[0];
9512 buffer = stream_.userBuffer[0];
9513 samples = stream_.bufferSize * stream_.nUserChannels[0];
9514 format = stream_.userFormat;
9517 // Do byte swapping if necessary.
9518 if ( stream_.doByteSwap[0] )
9519 byteSwapBuffer( buffer, samples, format );
9521 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9523 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9524 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9525 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9526 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9527 handle->triggered = true;
9530 // Write samples to device.
9531 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9533 if ( result == -1 ) {
9534 // We'll assume this is an underrun, though there isn't a
9535 // specific means for determining that.
9536 handle->xrun[0] = true;
9537 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9538 error( RtAudioError::WARNING );
9539 // Continue on to input section.
9543 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9545 // Setup parameters.
9546 if ( stream_.doConvertBuffer[1] ) {
9547 buffer = stream_.deviceBuffer;
9548 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9549 format = stream_.deviceFormat[1];
9552 buffer = stream_.userBuffer[1];
9553 samples = stream_.bufferSize * stream_.nUserChannels[1];
9554 format = stream_.userFormat;
9557 // Read samples from device.
9558 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9560 if ( result == -1 ) {
9561 // We'll assume this is an overrun, though there isn't a
9562 // specific means for determining that.
9563 handle->xrun[1] = true;
9564 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9565 error( RtAudioError::WARNING );
9569 // Do byte swapping if necessary.
9570 if ( stream_.doByteSwap[1] )
9571 byteSwapBuffer( buffer, samples, format );
9573 // Do buffer conversion if necessary.
9574 if ( stream_.doConvertBuffer[1] )
9575 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9579 MUTEX_UNLOCK( &stream_.mutex );
9581 RtApi::tickStreamTime();
9582 if ( doStopStream == 1 ) this->stopStream();
9585 static void *ossCallbackHandler( void *ptr )
9587 CallbackInfo *info = (CallbackInfo *) ptr;
9588 RtApiOss *object = (RtApiOss *) info->object;
9589 bool *isRunning = &info->isRunning;
9591 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9592 if (info->doRealtime) {
9593 std::cerr << "RtAudio oss: " <<
9594 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9595 "running realtime scheduling" << std::endl;
9599 while ( *isRunning == true ) {
9600 pthread_testcancel();
9601 object->callbackEvent();
9604 pthread_exit( NULL );
9607 //******************** End of __LINUX_OSS__ *********************//
9611 // *************************************************** //
9613 // Protected common (OS-independent) RtAudio methods.
9615 // *************************************************** //
9617 // This method can be modified to control the behavior of error
9618 // message printing.
9619 void RtApi :: error( RtAudioError::Type type )
9621 errorStream_.str(""); // clear the ostringstream
9623 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9624 if ( errorCallback ) {
9625 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9627 if ( firstErrorOccurred_ )
9630 firstErrorOccurred_ = true;
9631 const std::string errorMessage = errorText_;
9633 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9634 stream_.callbackInfo.isRunning = false; // exit from the thread
9638 errorCallback( type, errorMessage );
9639 firstErrorOccurred_ = false;
9643 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9644 std::cerr << '\n' << errorText_ << "\n\n";
9645 else if ( type != RtAudioError::WARNING )
9646 throw( RtAudioError( errorText_, type ) );
9649 void RtApi :: verifyStream()
9651 if ( stream_.state == STREAM_CLOSED ) {
9652 errorText_ = "RtApi:: a stream is not open!";
9653 error( RtAudioError::INVALID_USE );
9657 void RtApi :: clearStreamInfo()
9659 stream_.mode = UNINITIALIZED;
9660 stream_.state = STREAM_CLOSED;
9661 stream_.sampleRate = 0;
9662 stream_.bufferSize = 0;
9663 stream_.nBuffers = 0;
9664 stream_.userFormat = 0;
9665 stream_.userInterleaved = true;
9666 stream_.streamTime = 0.0;
9667 stream_.apiHandle = 0;
9668 stream_.deviceBuffer = 0;
9669 stream_.callbackInfo.callback = 0;
9670 stream_.callbackInfo.userData = 0;
9671 stream_.callbackInfo.isRunning = false;
9672 stream_.callbackInfo.errorCallback = 0;
9673 for ( int i=0; i<2; i++ ) {
9674 stream_.device[i] = 11111;
9675 stream_.doConvertBuffer[i] = false;
9676 stream_.deviceInterleaved[i] = true;
9677 stream_.doByteSwap[i] = false;
9678 stream_.nUserChannels[i] = 0;
9679 stream_.nDeviceChannels[i] = 0;
9680 stream_.channelOffset[i] = 0;
9681 stream_.deviceFormat[i] = 0;
9682 stream_.latency[i] = 0;
9683 stream_.userBuffer[i] = 0;
9684 stream_.convertInfo[i].channels = 0;
9685 stream_.convertInfo[i].inJump = 0;
9686 stream_.convertInfo[i].outJump = 0;
9687 stream_.convertInfo[i].inFormat = 0;
9688 stream_.convertInfo[i].outFormat = 0;
9689 stream_.convertInfo[i].inOffset.clear();
9690 stream_.convertInfo[i].outOffset.clear();
9694 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9696 if ( format == RTAUDIO_SINT16 )
9698 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9700 else if ( format == RTAUDIO_FLOAT64 )
9702 else if ( format == RTAUDIO_SINT24 )
9704 else if ( format == RTAUDIO_SINT8 )
9707 errorText_ = "RtApi::formatBytes: undefined format.";
9708 error( RtAudioError::WARNING );
9713 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9715 if ( mode == INPUT ) { // convert device to user buffer
9716 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9717 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9718 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9719 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9721 else { // convert user to device buffer
9722 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9723 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9724 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9725 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9728 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9729 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9731 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9733 // Set up the interleave/deinterleave offsets.
9734 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9735 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9736 ( mode == INPUT && stream_.userInterleaved ) ) {
9737 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9738 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9739 stream_.convertInfo[mode].outOffset.push_back( k );
9740 stream_.convertInfo[mode].inJump = 1;
9744 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9745 stream_.convertInfo[mode].inOffset.push_back( k );
9746 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9747 stream_.convertInfo[mode].outJump = 1;
9751 else { // no (de)interleaving
9752 if ( stream_.userInterleaved ) {
9753 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9754 stream_.convertInfo[mode].inOffset.push_back( k );
9755 stream_.convertInfo[mode].outOffset.push_back( k );
9759 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9760 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9761 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9762 stream_.convertInfo[mode].inJump = 1;
9763 stream_.convertInfo[mode].outJump = 1;
9768 // Add channel offset.
9769 if ( firstChannel > 0 ) {
9770 if ( stream_.deviceInterleaved[mode] ) {
9771 if ( mode == OUTPUT ) {
9772 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9773 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9776 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9777 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9781 if ( mode == OUTPUT ) {
9782 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9783 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9786 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9787 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9793 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9795 // This function does format conversion, input/output channel compensation, and
9796 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9797 // the lower three bytes of a 32-bit integer.
9799 // Clear our device buffer when in/out duplex device channels are different
9800 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9801 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9802 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9805 if (info.outFormat == RTAUDIO_FLOAT64) {
9807 Float64 *out = (Float64 *)outBuffer;
9809 if (info.inFormat == RTAUDIO_SINT8) {
9810 signed char *in = (signed char *)inBuffer;
9811 scale = 1.0 / 127.5;
9812 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9813 for (j=0; j<info.channels; j++) {
9814 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9815 out[info.outOffset[j]] += 0.5;
9816 out[info.outOffset[j]] *= scale;
9819 out += info.outJump;
9822 else if (info.inFormat == RTAUDIO_SINT16) {
9823 Int16 *in = (Int16 *)inBuffer;
9824 scale = 1.0 / 32767.5;
9825 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9826 for (j=0; j<info.channels; j++) {
9827 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9828 out[info.outOffset[j]] += 0.5;
9829 out[info.outOffset[j]] *= scale;
9832 out += info.outJump;
9835 else if (info.inFormat == RTAUDIO_SINT24) {
9836 Int24 *in = (Int24 *)inBuffer;
9837 scale = 1.0 / 8388607.5;
9838 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9839 for (j=0; j<info.channels; j++) {
9840 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9841 out[info.outOffset[j]] += 0.5;
9842 out[info.outOffset[j]] *= scale;
9845 out += info.outJump;
9848 else if (info.inFormat == RTAUDIO_SINT32) {
9849 Int32 *in = (Int32 *)inBuffer;
9850 scale = 1.0 / 2147483647.5;
9851 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9852 for (j=0; j<info.channels; j++) {
9853 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9854 out[info.outOffset[j]] += 0.5;
9855 out[info.outOffset[j]] *= scale;
9858 out += info.outJump;
9861 else if (info.inFormat == RTAUDIO_FLOAT32) {
9862 Float32 *in = (Float32 *)inBuffer;
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9864 for (j=0; j<info.channels; j++) {
9865 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9868 out += info.outJump;
9871 else if (info.inFormat == RTAUDIO_FLOAT64) {
9872 // Channel compensation and/or (de)interleaving only.
9873 Float64 *in = (Float64 *)inBuffer;
9874 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9875 for (j=0; j<info.channels; j++) {
9876 out[info.outOffset[j]] = in[info.inOffset[j]];
9879 out += info.outJump;
9883 else if (info.outFormat == RTAUDIO_FLOAT32) {
9885 Float32 *out = (Float32 *)outBuffer;
9887 if (info.inFormat == RTAUDIO_SINT8) {
9888 signed char *in = (signed char *)inBuffer;
9889 scale = (Float32) ( 1.0 / 127.5 );
9890 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9891 for (j=0; j<info.channels; j++) {
9892 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9893 out[info.outOffset[j]] += 0.5;
9894 out[info.outOffset[j]] *= scale;
9897 out += info.outJump;
9900 else if (info.inFormat == RTAUDIO_SINT16) {
9901 Int16 *in = (Int16 *)inBuffer;
9902 scale = (Float32) ( 1.0 / 32767.5 );
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9904 for (j=0; j<info.channels; j++) {
9905 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9906 out[info.outOffset[j]] += 0.5;
9907 out[info.outOffset[j]] *= scale;
9910 out += info.outJump;
9913 else if (info.inFormat == RTAUDIO_SINT24) {
9914 Int24 *in = (Int24 *)inBuffer;
9915 scale = (Float32) ( 1.0 / 8388607.5 );
9916 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9917 for (j=0; j<info.channels; j++) {
9918 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9919 out[info.outOffset[j]] += 0.5;
9920 out[info.outOffset[j]] *= scale;
9923 out += info.outJump;
9926 else if (info.inFormat == RTAUDIO_SINT32) {
9927 Int32 *in = (Int32 *)inBuffer;
9928 scale = (Float32) ( 1.0 / 2147483647.5 );
9929 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9930 for (j=0; j<info.channels; j++) {
9931 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9932 out[info.outOffset[j]] += 0.5;
9933 out[info.outOffset[j]] *= scale;
9936 out += info.outJump;
9939 else if (info.inFormat == RTAUDIO_FLOAT32) {
9940 // Channel compensation and/or (de)interleaving only.
9941 Float32 *in = (Float32 *)inBuffer;
9942 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9943 for (j=0; j<info.channels; j++) {
9944 out[info.outOffset[j]] = in[info.inOffset[j]];
9947 out += info.outJump;
9950 else if (info.inFormat == RTAUDIO_FLOAT64) {
9951 Float64 *in = (Float64 *)inBuffer;
9952 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9953 for (j=0; j<info.channels; j++) {
9954 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9957 out += info.outJump;
9961 else if (info.outFormat == RTAUDIO_SINT32) {
9962 Int32 *out = (Int32 *)outBuffer;
9963 if (info.inFormat == RTAUDIO_SINT8) {
9964 signed char *in = (signed char *)inBuffer;
9965 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9966 for (j=0; j<info.channels; j++) {
9967 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9968 out[info.outOffset[j]] <<= 24;
9971 out += info.outJump;
9974 else if (info.inFormat == RTAUDIO_SINT16) {
9975 Int16 *in = (Int16 *)inBuffer;
9976 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9977 for (j=0; j<info.channels; j++) {
9978 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9979 out[info.outOffset[j]] <<= 16;
9982 out += info.outJump;
9985 else if (info.inFormat == RTAUDIO_SINT24) {
9986 Int24 *in = (Int24 *)inBuffer;
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9988 for (j=0; j<info.channels; j++) {
9989 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9990 out[info.outOffset[j]] <<= 8;
9993 out += info.outJump;
9996 else if (info.inFormat == RTAUDIO_SINT32) {
9997 // Channel compensation and/or (de)interleaving only.
9998 Int32 *in = (Int32 *)inBuffer;
9999 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10000 for (j=0; j<info.channels; j++) {
10001 out[info.outOffset[j]] = in[info.inOffset[j]];
10004 out += info.outJump;
10007 else if (info.inFormat == RTAUDIO_FLOAT32) {
10008 Float32 *in = (Float32 *)inBuffer;
10009 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10010 for (j=0; j<info.channels; j++) {
10011 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10014 out += info.outJump;
10017 else if (info.inFormat == RTAUDIO_FLOAT64) {
10018 Float64 *in = (Float64 *)inBuffer;
10019 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10020 for (j=0; j<info.channels; j++) {
10021 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10024 out += info.outJump;
10028 else if (info.outFormat == RTAUDIO_SINT24) {
10029 Int24 *out = (Int24 *)outBuffer;
10030 if (info.inFormat == RTAUDIO_SINT8) {
10031 signed char *in = (signed char *)inBuffer;
10032 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10033 for (j=0; j<info.channels; j++) {
10034 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10035 //out[info.outOffset[j]] <<= 16;
10038 out += info.outJump;
10041 else if (info.inFormat == RTAUDIO_SINT16) {
10042 Int16 *in = (Int16 *)inBuffer;
10043 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10044 for (j=0; j<info.channels; j++) {
10045 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10046 //out[info.outOffset[j]] <<= 8;
10049 out += info.outJump;
10052 else if (info.inFormat == RTAUDIO_SINT24) {
10053 // Channel compensation and/or (de)interleaving only.
10054 Int24 *in = (Int24 *)inBuffer;
10055 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10056 for (j=0; j<info.channels; j++) {
10057 out[info.outOffset[j]] = in[info.inOffset[j]];
10060 out += info.outJump;
10063 else if (info.inFormat == RTAUDIO_SINT32) {
10064 Int32 *in = (Int32 *)inBuffer;
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10066 for (j=0; j<info.channels; j++) {
10067 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10068 //out[info.outOffset[j]] >>= 8;
10071 out += info.outJump;
10074 else if (info.inFormat == RTAUDIO_FLOAT32) {
10075 Float32 *in = (Float32 *)inBuffer;
10076 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10077 for (j=0; j<info.channels; j++) {
10078 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10081 out += info.outJump;
10084 else if (info.inFormat == RTAUDIO_FLOAT64) {
10085 Float64 *in = (Float64 *)inBuffer;
10086 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10087 for (j=0; j<info.channels; j++) {
10088 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10091 out += info.outJump;
10095 else if (info.outFormat == RTAUDIO_SINT16) {
10096 Int16 *out = (Int16 *)outBuffer;
10097 if (info.inFormat == RTAUDIO_SINT8) {
10098 signed char *in = (signed char *)inBuffer;
10099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10100 for (j=0; j<info.channels; j++) {
10101 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10102 out[info.outOffset[j]] <<= 8;
10105 out += info.outJump;
10108 else if (info.inFormat == RTAUDIO_SINT16) {
10109 // Channel compensation and/or (de)interleaving only.
10110 Int16 *in = (Int16 *)inBuffer;
10111 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10112 for (j=0; j<info.channels; j++) {
10113 out[info.outOffset[j]] = in[info.inOffset[j]];
10116 out += info.outJump;
10119 else if (info.inFormat == RTAUDIO_SINT24) {
10120 Int24 *in = (Int24 *)inBuffer;
10121 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10122 for (j=0; j<info.channels; j++) {
10123 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10126 out += info.outJump;
10129 else if (info.inFormat == RTAUDIO_SINT32) {
10130 Int32 *in = (Int32 *)inBuffer;
10131 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10132 for (j=0; j<info.channels; j++) {
10133 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10136 out += info.outJump;
10139 else if (info.inFormat == RTAUDIO_FLOAT32) {
10140 Float32 *in = (Float32 *)inBuffer;
10141 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10142 for (j=0; j<info.channels; j++) {
10143 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10146 out += info.outJump;
10149 else if (info.inFormat == RTAUDIO_FLOAT64) {
10150 Float64 *in = (Float64 *)inBuffer;
10151 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10152 for (j=0; j<info.channels; j++) {
10153 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10156 out += info.outJump;
10160 else if (info.outFormat == RTAUDIO_SINT8) {
10161 signed char *out = (signed char *)outBuffer;
10162 if (info.inFormat == RTAUDIO_SINT8) {
10163 // Channel compensation and/or (de)interleaving only.
10164 signed char *in = (signed char *)inBuffer;
10165 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10166 for (j=0; j<info.channels; j++) {
10167 out[info.outOffset[j]] = in[info.inOffset[j]];
10170 out += info.outJump;
10173 if (info.inFormat == RTAUDIO_SINT16) {
10174 Int16 *in = (Int16 *)inBuffer;
10175 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10176 for (j=0; j<info.channels; j++) {
10177 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10180 out += info.outJump;
10183 else if (info.inFormat == RTAUDIO_SINT24) {
10184 Int24 *in = (Int24 *)inBuffer;
10185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10186 for (j=0; j<info.channels; j++) {
10187 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10190 out += info.outJump;
10193 else if (info.inFormat == RTAUDIO_SINT32) {
10194 Int32 *in = (Int32 *)inBuffer;
10195 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10196 for (j=0; j<info.channels; j++) {
10197 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10200 out += info.outJump;
10203 else if (info.inFormat == RTAUDIO_FLOAT32) {
10204 Float32 *in = (Float32 *)inBuffer;
10205 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10206 for (j=0; j<info.channels; j++) {
10207 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10210 out += info.outJump;
10213 else if (info.inFormat == RTAUDIO_FLOAT64) {
10214 Float64 *in = (Float64 *)inBuffer;
10215 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10216 for (j=0; j<info.channels; j++) {
10217 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10220 out += info.outJump;
10226 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10227 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10228 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10230 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10236 if ( format == RTAUDIO_SINT16 ) {
10237 for ( unsigned int i=0; i<samples; i++ ) {
10238 // Swap 1st and 2nd bytes.
10243 // Increment 2 bytes.
10247 else if ( format == RTAUDIO_SINT32 ||
10248 format == RTAUDIO_FLOAT32 ) {
10249 for ( unsigned int i=0; i<samples; i++ ) {
10250 // Swap 1st and 4th bytes.
10255 // Swap 2nd and 3rd bytes.
10261 // Increment 3 more bytes.
10265 else if ( format == RTAUDIO_SINT24 ) {
10266 for ( unsigned int i=0; i<samples; i++ ) {
10267 // Swap 1st and 3rd bytes.
10272 // Increment 2 more bytes.
10276 else if ( format == RTAUDIO_FLOAT64 ) {
10277 for ( unsigned int i=0; i<samples; i++ ) {
10278 // Swap 1st and 8th bytes
10283 // Swap 2nd and 7th bytes
10289 // Swap 3rd and 6th bytes
10295 // Swap 4th and 5th bytes
10301 // Increment 5 more bytes.
10307 // Indentation settings for Vim and Emacs
10309 // Local Variables:
10310 // c-basic-offset: 2
10311 // indent-tabs-mode: nil
10314 // vim: et sts=2 sw=2