1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
102 // TODO: replace with initializer list in C++11.
103 // The order here will control the order of RtAudio's API search in
105 // Have to maintain a separate list of API enum identifiers since map
106 // doesn't preserve insertion order.
107 static std::pair< RtAudio::ApiNameMap, std::vector<RtAudio::Api> > init_ApiNames()
109 RtAudio::ApiNameMap names;
110 std::vector<RtAudio::Api> apis;
111 #if defined(__UNIX_JACK__)
112 names["jack"] = std::pair<RtAudio::Api, std::string>(RtAudio::UNIX_JACK, "Jack");
113 apis.push_back(RtAudio::UNIX_JACK);
115 #if defined(__LINUX_PULSE__)
116 names["pulse"] = std::pair<RtAudio::Api, std::string>(RtAudio::LINUX_PULSE, "Pulse");
117 apis.push_back(RtAudio::LINUX_PULSE);
119 #if defined(__LINUX_ALSA__)
120 names["alsa"] = std::pair<RtAudio::Api, std::string>(RtAudio::LINUX_ALSA, "ALSA");
121 apis.push_back(RtAudio::LINUX_ALSA);
123 #if defined(__LINUX_OSS__)
124 names["oss"] = std::pair<RtAudio::Api, std::string>(RtAudio::LINUX_OSS, "OSS");
125 apis.push_back(RtAudio::LINUX_OSS);
127 #if defined(__WINDOWS_ASIO__)
128 names["asio"] = std::pair<RtAudio::Api, std::string>(RtAudio::WINDOWS_ASIO, "ASIO");
129 apis.push_back(RtAudio::WINDOWS_ASIO);
131 #if defined(__WINDOWS_WASAPI__)
132 names["wasapi"] = std::pair<RtAudio::Api, std::string>(RtAudio::WINDOWS_WASAPI, "WASAPI");
133 apis.push_back(RtAudio::WINDOWS_WASAPI);
135 #if defined(__WINDOWS_DS__)
136 names["ds"] = std::pair<RtAudio::Api, std::string>(RtAudio::WINDOWS_DS, "DirectSound");
137 apis.push_back(RtAudio::WINDOWS_DS);
139 #if defined(__MACOSX_CORE__)
140 names["core"] = std::pair<RtAudio::Api, std::string>(RtAudio::MACOSX_CORE, "CoreAudio");
141 apis.push_back(RtAudio::MACOSX_CORE);
143 #if defined(__RTAUDIO_DUMMY__)
144 names["dummy"] = std::pair<RtAudio::Api, std::string>(RtAudio::RTAUDIO_DUMMY, "Dummy");
145 apis.push_back(RtAudio::RTAUDIO_DUMMY);
147 return std::make_pair(names, apis);
150 const RtAudio::ApiNameMap RtAudio::apiNames(init_ApiNames().first);
151 const std::vector<RtAudio::Api> RtAudio::compiledApis(init_ApiNames().second);
153 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
158 const std::vector<RtAudio::Api>& RtAudio :: getCompiledApi()
163 const std::string RtAudio :: getCompiledApiName( RtAudio::Api api )
165 ApiNameMap::const_iterator it;
166 for (it = apiNames.begin(); it != apiNames.end(); it++)
167 if (it->second.first == api)
172 const std::string RtAudio :: getCompiledApiDisplayName( RtAudio::Api api )
174 ApiNameMap::const_iterator it;
175 for (it = apiNames.begin(); it != apiNames.end(); it++)
176 if (it->second.first == api)
177 return it->second.second;
181 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
183 if (apiNames.find(name) == apiNames.end())
184 return RtAudio::UNSPECIFIED;
185 return apiNames.at(name).first;
188 void RtAudio :: openRtApi( RtAudio::Api api )
194 #if defined(__UNIX_JACK__)
195 if ( api == UNIX_JACK )
196 rtapi_ = new RtApiJack();
198 #if defined(__LINUX_ALSA__)
199 if ( api == LINUX_ALSA )
200 rtapi_ = new RtApiAlsa();
202 #if defined(__LINUX_PULSE__)
203 if ( api == LINUX_PULSE )
204 rtapi_ = new RtApiPulse();
206 #if defined(__LINUX_OSS__)
207 if ( api == LINUX_OSS )
208 rtapi_ = new RtApiOss();
210 #if defined(__WINDOWS_ASIO__)
211 if ( api == WINDOWS_ASIO )
212 rtapi_ = new RtApiAsio();
214 #if defined(__WINDOWS_WASAPI__)
215 if ( api == WINDOWS_WASAPI )
216 rtapi_ = new RtApiWasapi();
218 #if defined(__WINDOWS_DS__)
219 if ( api == WINDOWS_DS )
220 rtapi_ = new RtApiDs();
222 #if defined(__MACOSX_CORE__)
223 if ( api == MACOSX_CORE )
224 rtapi_ = new RtApiCore();
226 #if defined(__RTAUDIO_DUMMY__)
227 if ( api == RTAUDIO_DUMMY )
228 rtapi_ = new RtApiDummy();
232 RtAudio :: RtAudio( RtAudio::Api api )
236 if ( api != UNSPECIFIED ) {
237 // Attempt to open the specified API.
239 if ( rtapi_ ) return;
241 // No compiled support for specified API value. Issue a debug
242 // warning and continue as if no API was specified.
243 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
246 // Iterate through the compiled APIs and return as soon as we find
247 // one with at least one device or we reach the end of the list.
248 std::vector< RtAudio::Api > apis;
249 getCompiledApi( apis );
250 for ( unsigned int i=0; i<apis.size(); i++ ) {
251 openRtApi( apis[i] );
252 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
255 if ( rtapi_ ) return;
257 // It should not be possible to get here because the preprocessor
258 // definition __RTAUDIO_DUMMY__ is automatically defined if no
259 // API-specific definitions are passed to the compiler. But just in
260 // case something weird happens, we'll thow an error.
261 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
262 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
265 RtAudio :: ~RtAudio()
271 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
272 RtAudio::StreamParameters *inputParameters,
273 RtAudioFormat format, unsigned int sampleRate,
274 unsigned int *bufferFrames,
275 RtAudioCallback callback, void *userData,
276 RtAudio::StreamOptions *options,
277 RtAudioErrorCallback errorCallback )
279 return rtapi_->openStream( outputParameters, inputParameters, format,
280 sampleRate, bufferFrames, callback,
281 userData, options, errorCallback );
284 // *************************************************** //
286 // Public RtApi definitions (see end of file for
287 // private or protected utility functions).
289 // *************************************************** //
293 stream_.state = STREAM_CLOSED;
294 stream_.mode = UNINITIALIZED;
295 stream_.apiHandle = 0;
296 stream_.userBuffer[0] = 0;
297 stream_.userBuffer[1] = 0;
298 MUTEX_INITIALIZE( &stream_.mutex );
299 showWarnings_ = true;
300 firstErrorOccurred_ = false;
305 MUTEX_DESTROY( &stream_.mutex );
308 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
309 RtAudio::StreamParameters *iParams,
310 RtAudioFormat format, unsigned int sampleRate,
311 unsigned int *bufferFrames,
312 RtAudioCallback callback, void *userData,
313 RtAudio::StreamOptions *options,
314 RtAudioErrorCallback errorCallback )
316 if ( stream_.state != STREAM_CLOSED ) {
317 errorText_ = "RtApi::openStream: a stream is already open!";
318 error( RtAudioError::INVALID_USE );
322 // Clear stream information potentially left from a previously open stream.
325 if ( oParams && oParams->nChannels < 1 ) {
326 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
327 error( RtAudioError::INVALID_USE );
331 if ( iParams && iParams->nChannels < 1 ) {
332 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
333 error( RtAudioError::INVALID_USE );
337 if ( oParams == NULL && iParams == NULL ) {
338 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
339 error( RtAudioError::INVALID_USE );
343 if ( formatBytes(format) == 0 ) {
344 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
345 error( RtAudioError::INVALID_USE );
349 unsigned int nDevices = getDeviceCount();
350 unsigned int oChannels = 0;
352 oChannels = oParams->nChannels;
353 if ( oParams->deviceId >= nDevices ) {
354 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
355 error( RtAudioError::INVALID_USE );
360 unsigned int iChannels = 0;
362 iChannels = iParams->nChannels;
363 if ( iParams->deviceId >= nDevices ) {
364 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
365 error( RtAudioError::INVALID_USE );
372 if ( oChannels > 0 ) {
374 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
375 sampleRate, format, bufferFrames, options );
376 if ( result == false ) {
377 error( RtAudioError::SYSTEM_ERROR );
382 if ( iChannels > 0 ) {
384 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
385 sampleRate, format, bufferFrames, options );
386 if ( result == false ) {
387 if ( oChannels > 0 ) closeStream();
388 error( RtAudioError::SYSTEM_ERROR );
393 stream_.callbackInfo.callback = (void *) callback;
394 stream_.callbackInfo.userData = userData;
395 stream_.callbackInfo.errorCallback = (void *) errorCallback;
397 if ( options ) options->numberOfBuffers = stream_.nBuffers;
398 stream_.state = STREAM_STOPPED;
401 unsigned int RtApi :: getDefaultInputDevice( void )
403 // Should be implemented in subclasses if possible.
407 unsigned int RtApi :: getDefaultOutputDevice( void )
409 // Should be implemented in subclasses if possible.
413 void RtApi :: closeStream( void )
415 // MUST be implemented in subclasses!
419 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
420 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
421 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
422 RtAudio::StreamOptions * /*options*/ )
424 // MUST be implemented in subclasses!
428 void RtApi :: tickStreamTime( void )
430 // Subclasses that do not provide their own implementation of
431 // getStreamTime should call this function once per buffer I/O to
432 // provide basic stream time support.
434 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
436 #if defined( HAVE_GETTIMEOFDAY )
437 gettimeofday( &stream_.lastTickTimestamp, NULL );
441 long RtApi :: getStreamLatency( void )
445 long totalLatency = 0;
446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
447 totalLatency = stream_.latency[0];
448 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
449 totalLatency += stream_.latency[1];
454 double RtApi :: getStreamTime( void )
458 #if defined( HAVE_GETTIMEOFDAY )
459 // Return a very accurate estimate of the stream time by
460 // adding in the elapsed time since the last tick.
464 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
465 return stream_.streamTime;
467 gettimeofday( &now, NULL );
468 then = stream_.lastTickTimestamp;
469 return stream_.streamTime +
470 ((now.tv_sec + 0.000001 * now.tv_usec) -
471 (then.tv_sec + 0.000001 * then.tv_usec));
473 return stream_.streamTime;
477 void RtApi :: setStreamTime( double time )
482 stream_.streamTime = time;
483 #if defined( HAVE_GETTIMEOFDAY )
484 gettimeofday( &stream_.lastTickTimestamp, NULL );
488 unsigned int RtApi :: getStreamSampleRate( void )
492 return stream_.sampleRate;
496 // *************************************************** //
498 // OS/API-specific methods.
500 // *************************************************** //
502 #if defined(__MACOSX_CORE__)
504 // The OS X CoreAudio API is designed to use a separate callback
505 // procedure for each of its audio devices. A single RtAudio duplex
506 // stream using two different devices is supported here, though it
507 // cannot be guaranteed to always behave correctly because we cannot
508 // synchronize these two callbacks.
510 // A property listener is installed for over/underrun information.
511 // However, no functionality is currently provided to allow property
512 // listeners to trigger user handlers because it is unclear what could
513 // be done if a critical stream parameter (buffer size, sample rate,
514 // device disconnect) notification arrived. The listeners entail
515 // quite a bit of extra code and most likely, a user program wouldn't
516 // be prepared for the result anyway. However, we do provide a flag
517 // to the client callback function to inform of an over/underrun.
519 // A structure to hold various information related to the CoreAudio API
522 AudioDeviceID id[2]; // device ids
523 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
524 AudioDeviceIOProcID procId[2];
526 UInt32 iStream[2]; // device stream index (or first if using multiple)
527 UInt32 nStreams[2]; // number of streams to use
530 pthread_cond_t condition;
531 int drainCounter; // Tracks callback counts when draining
532 bool internalDrain; // Indicates if stop is initiated from callback or not.
535 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
538 RtApiCore:: RtApiCore()
540 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
541 // This is a largely undocumented but absolutely necessary
542 // requirement starting with OS-X 10.6. If not called, queries and
543 // updates to various audio device properties are not handled
545 CFRunLoopRef theRunLoop = NULL;
546 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
547 kAudioObjectPropertyScopeGlobal,
548 kAudioObjectPropertyElementMaster };
549 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
550 if ( result != noErr ) {
551 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
552 error( RtAudioError::WARNING );
557 RtApiCore :: ~RtApiCore()
559 // The subclass destructor gets called before the base class
560 // destructor, so close an existing stream before deallocating
561 // apiDeviceId memory.
562 if ( stream_.state != STREAM_CLOSED ) closeStream();
565 unsigned int RtApiCore :: getDeviceCount( void )
567 // Find out how many audio devices there are, if any.
569 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
570 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
571 if ( result != noErr ) {
572 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
573 error( RtAudioError::WARNING );
577 return dataSize / sizeof( AudioDeviceID );
580 unsigned int RtApiCore :: getDefaultInputDevice( void )
582 unsigned int nDevices = getDeviceCount();
583 if ( nDevices <= 1 ) return 0;
586 UInt32 dataSize = sizeof( AudioDeviceID );
587 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
588 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
589 if ( result != noErr ) {
590 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
591 error( RtAudioError::WARNING );
595 dataSize *= nDevices;
596 AudioDeviceID deviceList[ nDevices ];
597 property.mSelector = kAudioHardwarePropertyDevices;
598 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
599 if ( result != noErr ) {
600 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
601 error( RtAudioError::WARNING );
605 for ( unsigned int i=0; i<nDevices; i++ )
606 if ( id == deviceList[i] ) return i;
608 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
609 error( RtAudioError::WARNING );
613 unsigned int RtApiCore :: getDefaultOutputDevice( void )
615 unsigned int nDevices = getDeviceCount();
616 if ( nDevices <= 1 ) return 0;
619 UInt32 dataSize = sizeof( AudioDeviceID );
620 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
621 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
622 if ( result != noErr ) {
623 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
624 error( RtAudioError::WARNING );
628 dataSize = sizeof( AudioDeviceID ) * nDevices;
629 AudioDeviceID deviceList[ nDevices ];
630 property.mSelector = kAudioHardwarePropertyDevices;
631 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
632 if ( result != noErr ) {
633 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
634 error( RtAudioError::WARNING );
638 for ( unsigned int i=0; i<nDevices; i++ )
639 if ( id == deviceList[i] ) return i;
641 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
642 error( RtAudioError::WARNING );
646 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
648 RtAudio::DeviceInfo info;
652 unsigned int nDevices = getDeviceCount();
653 if ( nDevices == 0 ) {
654 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
655 error( RtAudioError::INVALID_USE );
659 if ( device >= nDevices ) {
660 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
661 error( RtAudioError::INVALID_USE );
665 AudioDeviceID deviceList[ nDevices ];
666 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
667 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
668 kAudioObjectPropertyScopeGlobal,
669 kAudioObjectPropertyElementMaster };
670 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
671 0, NULL, &dataSize, (void *) &deviceList );
672 if ( result != noErr ) {
673 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
674 error( RtAudioError::WARNING );
678 AudioDeviceID id = deviceList[ device ];
680 // Get the device name.
683 dataSize = sizeof( CFStringRef );
684 property.mSelector = kAudioObjectPropertyManufacturer;
685 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
686 if ( result != noErr ) {
687 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
688 errorText_ = errorStream_.str();
689 error( RtAudioError::WARNING );
693 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
694 int length = CFStringGetLength(cfname);
695 char *mname = (char *)malloc(length * 3 + 1);
696 #if defined( UNICODE ) || defined( _UNICODE )
697 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
699 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
701 info.name.append( (const char *)mname, strlen(mname) );
702 info.name.append( ": " );
706 property.mSelector = kAudioObjectPropertyName;
707 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
708 if ( result != noErr ) {
709 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
710 errorText_ = errorStream_.str();
711 error( RtAudioError::WARNING );
715 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
716 length = CFStringGetLength(cfname);
717 char *name = (char *)malloc(length * 3 + 1);
718 #if defined( UNICODE ) || defined( _UNICODE )
719 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
721 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
723 info.name.append( (const char *)name, strlen(name) );
727 // Get the output stream "configuration".
728 AudioBufferList *bufferList = nil;
729 property.mSelector = kAudioDevicePropertyStreamConfiguration;
730 property.mScope = kAudioDevicePropertyScopeOutput;
731 // property.mElement = kAudioObjectPropertyElementWildcard;
733 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
734 if ( result != noErr || dataSize == 0 ) {
735 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
736 errorText_ = errorStream_.str();
737 error( RtAudioError::WARNING );
741 // Allocate the AudioBufferList.
742 bufferList = (AudioBufferList *) malloc( dataSize );
743 if ( bufferList == NULL ) {
744 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
745 error( RtAudioError::WARNING );
749 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
750 if ( result != noErr || dataSize == 0 ) {
752 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
753 errorText_ = errorStream_.str();
754 error( RtAudioError::WARNING );
758 // Get output channel information.
759 unsigned int i, nStreams = bufferList->mNumberBuffers;
760 for ( i=0; i<nStreams; i++ )
761 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
764 // Get the input stream "configuration".
765 property.mScope = kAudioDevicePropertyScopeInput;
766 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
767 if ( result != noErr || dataSize == 0 ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // Allocate the AudioBufferList.
775 bufferList = (AudioBufferList *) malloc( dataSize );
776 if ( bufferList == NULL ) {
777 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
778 error( RtAudioError::WARNING );
782 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
783 if (result != noErr || dataSize == 0) {
785 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
786 errorText_ = errorStream_.str();
787 error( RtAudioError::WARNING );
791 // Get input channel information.
792 nStreams = bufferList->mNumberBuffers;
793 for ( i=0; i<nStreams; i++ )
794 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
797 // If device opens for both playback and capture, we determine the channels.
798 if ( info.outputChannels > 0 && info.inputChannels > 0 )
799 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
801 // Probe the device sample rates.
802 bool isInput = false;
803 if ( info.outputChannels == 0 ) isInput = true;
805 // Determine the supported sample rates.
806 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
807 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
808 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
809 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
810 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
811 errorText_ = errorStream_.str();
812 error( RtAudioError::WARNING );
816 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
817 AudioValueRange rangeList[ nRanges ];
818 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
819 if ( result != kAudioHardwareNoError ) {
820 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
821 errorText_ = errorStream_.str();
822 error( RtAudioError::WARNING );
826 // The sample rate reporting mechanism is a bit of a mystery. It
827 // seems that it can either return individual rates or a range of
828 // rates. I assume that if the min / max range values are the same,
829 // then that represents a single supported rate and if the min / max
830 // range values are different, the device supports an arbitrary
831 // range of values (though there might be multiple ranges, so we'll
832 // use the most conservative range).
833 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
834 bool haveValueRange = false;
835 info.sampleRates.clear();
836 for ( UInt32 i=0; i<nRanges; i++ ) {
837 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
838 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
839 info.sampleRates.push_back( tmpSr );
841 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
842 info.preferredSampleRate = tmpSr;
845 haveValueRange = true;
846 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
847 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
851 if ( haveValueRange ) {
852 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
853 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
854 info.sampleRates.push_back( SAMPLE_RATES[k] );
856 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
857 info.preferredSampleRate = SAMPLE_RATES[k];
862 // Sort and remove any redundant values
863 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
864 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
866 if ( info.sampleRates.size() == 0 ) {
867 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
868 errorText_ = errorStream_.str();
869 error( RtAudioError::WARNING );
873 // CoreAudio always uses 32-bit floating point data for PCM streams.
874 // Thus, any other "physical" formats supported by the device are of
875 // no interest to the client.
876 info.nativeFormats = RTAUDIO_FLOAT32;
878 if ( info.outputChannels > 0 )
879 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
880 if ( info.inputChannels > 0 )
881 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
887 static OSStatus callbackHandler( AudioDeviceID inDevice,
888 const AudioTimeStamp* /*inNow*/,
889 const AudioBufferList* inInputData,
890 const AudioTimeStamp* /*inInputTime*/,
891 AudioBufferList* outOutputData,
892 const AudioTimeStamp* /*inOutputTime*/,
895 CallbackInfo *info = (CallbackInfo *) infoPointer;
897 RtApiCore *object = (RtApiCore *) info->object;
898 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
899 return kAudioHardwareUnspecifiedError;
901 return kAudioHardwareNoError;
904 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
906 const AudioObjectPropertyAddress properties[],
907 void* handlePointer )
909 CoreHandle *handle = (CoreHandle *) handlePointer;
910 for ( UInt32 i=0; i<nAddresses; i++ ) {
911 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
912 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
913 handle->xrun[1] = true;
915 handle->xrun[0] = true;
919 return kAudioHardwareNoError;
922 static OSStatus rateListener( AudioObjectID inDevice,
923 UInt32 /*nAddresses*/,
924 const AudioObjectPropertyAddress /*properties*/[],
927 Float64 *rate = (Float64 *) ratePointer;
928 UInt32 dataSize = sizeof( Float64 );
929 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
930 kAudioObjectPropertyScopeGlobal,
931 kAudioObjectPropertyElementMaster };
932 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
933 return kAudioHardwareNoError;
936 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
937 unsigned int firstChannel, unsigned int sampleRate,
938 RtAudioFormat format, unsigned int *bufferSize,
939 RtAudio::StreamOptions *options )
942 unsigned int nDevices = getDeviceCount();
943 if ( nDevices == 0 ) {
944 // This should not happen because a check is made before this function is called.
945 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
949 if ( device >= nDevices ) {
950 // This should not happen because a check is made before this function is called.
951 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
955 AudioDeviceID deviceList[ nDevices ];
956 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
957 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
958 kAudioObjectPropertyScopeGlobal,
959 kAudioObjectPropertyElementMaster };
960 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
961 0, NULL, &dataSize, (void *) &deviceList );
962 if ( result != noErr ) {
963 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
967 AudioDeviceID id = deviceList[ device ];
969 // Setup for stream mode.
970 bool isInput = false;
971 if ( mode == INPUT ) {
973 property.mScope = kAudioDevicePropertyScopeInput;
976 property.mScope = kAudioDevicePropertyScopeOutput;
978 // Get the stream "configuration".
979 AudioBufferList *bufferList = nil;
981 property.mSelector = kAudioDevicePropertyStreamConfiguration;
982 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
983 if ( result != noErr || dataSize == 0 ) {
984 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
985 errorText_ = errorStream_.str();
989 // Allocate the AudioBufferList.
990 bufferList = (AudioBufferList *) malloc( dataSize );
991 if ( bufferList == NULL ) {
992 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
996 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
997 if (result != noErr || dataSize == 0) {
999 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1000 errorText_ = errorStream_.str();
1004 // Search for one or more streams that contain the desired number of
1005 // channels. CoreAudio devices can have an arbitrary number of
1006 // streams and each stream can have an arbitrary number of channels.
1007 // For each stream, a single buffer of interleaved samples is
1008 // provided. RtAudio prefers the use of one stream of interleaved
1009 // data or multiple consecutive single-channel streams. However, we
1010 // now support multiple consecutive multi-channel streams of
1011 // interleaved data as well.
1012 UInt32 iStream, offsetCounter = firstChannel;
1013 UInt32 nStreams = bufferList->mNumberBuffers;
1014 bool monoMode = false;
1015 bool foundStream = false;
1017 // First check that the device supports the requested number of
1019 UInt32 deviceChannels = 0;
1020 for ( iStream=0; iStream<nStreams; iStream++ )
1021 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1023 if ( deviceChannels < ( channels + firstChannel ) ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1026 errorText_ = errorStream_.str();
1030 // Look for a single stream meeting our needs.
1031 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1032 for ( iStream=0; iStream<nStreams; iStream++ ) {
1033 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1034 if ( streamChannels >= channels + offsetCounter ) {
1035 firstStream = iStream;
1036 channelOffset = offsetCounter;
1040 if ( streamChannels > offsetCounter ) break;
1041 offsetCounter -= streamChannels;
1044 // If we didn't find a single stream above, then we should be able
1045 // to meet the channel specification with multiple streams.
1046 if ( foundStream == false ) {
1048 offsetCounter = firstChannel;
1049 for ( iStream=0; iStream<nStreams; iStream++ ) {
1050 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1051 if ( streamChannels > offsetCounter ) break;
1052 offsetCounter -= streamChannels;
1055 firstStream = iStream;
1056 channelOffset = offsetCounter;
1057 Int32 channelCounter = channels + offsetCounter - streamChannels;
1059 if ( streamChannels > 1 ) monoMode = false;
1060 while ( channelCounter > 0 ) {
1061 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1062 if ( streamChannels > 1 ) monoMode = false;
1063 channelCounter -= streamChannels;
1070 // Determine the buffer size.
1071 AudioValueRange bufferRange;
1072 dataSize = sizeof( AudioValueRange );
1073 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1074 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1076 if ( result != noErr ) {
1077 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1078 errorText_ = errorStream_.str();
1082 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1083 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1084 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1086 // Set the buffer size. For multiple streams, I'm assuming we only
1087 // need to make this setting for the master channel.
1088 UInt32 theSize = (UInt32) *bufferSize;
1089 dataSize = sizeof( UInt32 );
1090 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1091 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1093 if ( result != noErr ) {
1094 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1095 errorText_ = errorStream_.str();
1099 // If attempting to setup a duplex stream, the bufferSize parameter
1100 // MUST be the same in both directions!
1101 *bufferSize = theSize;
1102 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1103 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1104 errorText_ = errorStream_.str();
1108 stream_.bufferSize = *bufferSize;
1109 stream_.nBuffers = 1;
1111 // Try to set "hog" mode ... it's not clear to me this is working.
1112 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1114 dataSize = sizeof( hog_pid );
1115 property.mSelector = kAudioDevicePropertyHogMode;
1116 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1117 if ( result != noErr ) {
1118 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1119 errorText_ = errorStream_.str();
1123 if ( hog_pid != getpid() ) {
1125 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1126 if ( result != noErr ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1128 errorText_ = errorStream_.str();
1134 // Check and if necessary, change the sample rate for the device.
1135 Float64 nominalRate;
1136 dataSize = sizeof( Float64 );
1137 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1141 errorText_ = errorStream_.str();
1145 // Only change the sample rate if off by more than 1 Hz.
1146 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1148 // Set a property listener for the sample rate change
1149 Float64 reportedRate = 0.0;
1150 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1151 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1152 if ( result != noErr ) {
1153 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1154 errorText_ = errorStream_.str();
1158 nominalRate = (Float64) sampleRate;
1159 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1160 if ( result != noErr ) {
1161 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1167 // Now wait until the reported nominal rate is what we just set.
1168 UInt32 microCounter = 0;
1169 while ( reportedRate != nominalRate ) {
1170 microCounter += 5000;
1171 if ( microCounter > 5000000 ) break;
1175 // Remove the property listener.
1176 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1178 if ( microCounter > 5000000 ) {
1179 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1180 errorText_ = errorStream_.str();
1185 // Now set the stream format for all streams. Also, check the
1186 // physical format of the device and change that if necessary.
1187 AudioStreamBasicDescription description;
1188 dataSize = sizeof( AudioStreamBasicDescription );
1189 property.mSelector = kAudioStreamPropertyVirtualFormat;
1190 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1191 if ( result != noErr ) {
1192 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1193 errorText_ = errorStream_.str();
1197 // Set the sample rate and data format id. However, only make the
1198 // change if the sample rate is not within 1.0 of the desired
1199 // rate and the format is not linear pcm.
1200 bool updateFormat = false;
1201 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1202 description.mSampleRate = (Float64) sampleRate;
1203 updateFormat = true;
1206 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1207 description.mFormatID = kAudioFormatLinearPCM;
1208 updateFormat = true;
1211 if ( updateFormat ) {
1212 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1213 if ( result != noErr ) {
1214 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1215 errorText_ = errorStream_.str();
1220 // Now check the physical format.
1221 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1222 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1223 if ( result != noErr ) {
1224 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1225 errorText_ = errorStream_.str();
1229 //std::cout << "Current physical stream format:" << std::endl;
1230 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1231 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1232 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1233 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1235 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1236 description.mFormatID = kAudioFormatLinearPCM;
1237 //description.mSampleRate = (Float64) sampleRate;
1238 AudioStreamBasicDescription testDescription = description;
1241 // We'll try higher bit rates first and then work our way down.
1242 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1243 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1244 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1245 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1246 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1247 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1248 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1250 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1252 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1256 bool setPhysicalFormat = false;
1257 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1258 testDescription = description;
1259 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1260 testDescription.mFormatFlags = physicalFormats[i].second;
1261 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1262 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1264 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1265 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1266 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1267 if ( result == noErr ) {
1268 setPhysicalFormat = true;
1269 //std::cout << "Updated physical stream format:" << std::endl;
1270 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1271 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1272 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1273 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1278 if ( !setPhysicalFormat ) {
1279 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1280 errorText_ = errorStream_.str();
1283 } // done setting virtual/physical formats.
1285 // Get the stream / device latency.
1287 dataSize = sizeof( UInt32 );
1288 property.mSelector = kAudioDevicePropertyLatency;
1289 if ( AudioObjectHasProperty( id, &property ) == true ) {
1290 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1291 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1293 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1294 errorText_ = errorStream_.str();
1295 error( RtAudioError::WARNING );
1299 // Byte-swapping: According to AudioHardware.h, the stream data will
1300 // always be presented in native-endian format, so we should never
1301 // need to byte swap.
1302 stream_.doByteSwap[mode] = false;
1304 // From the CoreAudio documentation, PCM data must be supplied as
1306 stream_.userFormat = format;
1307 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1309 if ( streamCount == 1 )
1310 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1311 else // multiple streams
1312 stream_.nDeviceChannels[mode] = channels;
1313 stream_.nUserChannels[mode] = channels;
1314 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1315 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1316 else stream_.userInterleaved = true;
1317 stream_.deviceInterleaved[mode] = true;
1318 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1320 // Set flags for buffer conversion.
1321 stream_.doConvertBuffer[mode] = false;
1322 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1323 stream_.doConvertBuffer[mode] = true;
1324 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1325 stream_.doConvertBuffer[mode] = true;
1326 if ( streamCount == 1 ) {
1327 if ( stream_.nUserChannels[mode] > 1 &&
1328 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1331 else if ( monoMode && stream_.userInterleaved )
1332 stream_.doConvertBuffer[mode] = true;
1334 // Allocate our CoreHandle structure for the stream.
1335 CoreHandle *handle = 0;
1336 if ( stream_.apiHandle == 0 ) {
1338 handle = new CoreHandle;
1340 catch ( std::bad_alloc& ) {
1341 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1345 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1349 stream_.apiHandle = (void *) handle;
1352 handle = (CoreHandle *) stream_.apiHandle;
1353 handle->iStream[mode] = firstStream;
1354 handle->nStreams[mode] = streamCount;
1355 handle->id[mode] = id;
1357 // Allocate necessary internal buffers.
1358 unsigned long bufferBytes;
1359 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1360 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1361 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1362 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1363 if ( stream_.userBuffer[mode] == NULL ) {
1364 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1368 // If possible, we will make use of the CoreAudio stream buffers as
1369 // "device buffers". However, we can't do this if using multiple
1371 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1373 bool makeBuffer = true;
1374 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1375 if ( mode == INPUT ) {
1376 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1377 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1378 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1383 bufferBytes *= *bufferSize;
1384 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1385 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1386 if ( stream_.deviceBuffer == NULL ) {
1387 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1393 stream_.sampleRate = sampleRate;
1394 stream_.device[mode] = device;
1395 stream_.state = STREAM_STOPPED;
1396 stream_.callbackInfo.object = (void *) this;
1398 // Setup the buffer conversion information structure.
1399 if ( stream_.doConvertBuffer[mode] ) {
1400 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1401 else setConvertInfo( mode, channelOffset );
1404 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1405 // Only one callback procedure per device.
1406 stream_.mode = DUPLEX;
1408 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1409 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1411 // deprecated in favor of AudioDeviceCreateIOProcID()
1412 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1414 if ( result != noErr ) {
1415 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1416 errorText_ = errorStream_.str();
1419 if ( stream_.mode == OUTPUT && mode == INPUT )
1420 stream_.mode = DUPLEX;
1422 stream_.mode = mode;
1425 // Setup the device property listener for over/underload.
1426 property.mSelector = kAudioDeviceProcessorOverload;
1427 property.mScope = kAudioObjectPropertyScopeGlobal;
1428 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1434 pthread_cond_destroy( &handle->condition );
1436 stream_.apiHandle = 0;
1439 for ( int i=0; i<2; i++ ) {
1440 if ( stream_.userBuffer[i] ) {
1441 free( stream_.userBuffer[i] );
1442 stream_.userBuffer[i] = 0;
1446 if ( stream_.deviceBuffer ) {
1447 free( stream_.deviceBuffer );
1448 stream_.deviceBuffer = 0;
1451 stream_.state = STREAM_CLOSED;
1455 void RtApiCore :: closeStream( void )
1457 if ( stream_.state == STREAM_CLOSED ) {
1458 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1459 error( RtAudioError::WARNING );
1463 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1464 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1466 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1467 kAudioObjectPropertyScopeGlobal,
1468 kAudioObjectPropertyElementMaster };
1470 property.mSelector = kAudioDeviceProcessorOverload;
1471 property.mScope = kAudioObjectPropertyScopeGlobal;
1472 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1473 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1474 error( RtAudioError::WARNING );
1477 if ( stream_.state == STREAM_RUNNING )
1478 AudioDeviceStop( handle->id[0], callbackHandler );
1479 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1480 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1482 // deprecated in favor of AudioDeviceDestroyIOProcID()
1483 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1487 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1489 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1490 kAudioObjectPropertyScopeGlobal,
1491 kAudioObjectPropertyElementMaster };
1493 property.mSelector = kAudioDeviceProcessorOverload;
1494 property.mScope = kAudioObjectPropertyScopeGlobal;
1495 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1496 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1497 error( RtAudioError::WARNING );
1500 if ( stream_.state == STREAM_RUNNING )
1501 AudioDeviceStop( handle->id[1], callbackHandler );
1502 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1503 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1505 // deprecated in favor of AudioDeviceDestroyIOProcID()
1506 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1510 for ( int i=0; i<2; i++ ) {
1511 if ( stream_.userBuffer[i] ) {
1512 free( stream_.userBuffer[i] );
1513 stream_.userBuffer[i] = 0;
1517 if ( stream_.deviceBuffer ) {
1518 free( stream_.deviceBuffer );
1519 stream_.deviceBuffer = 0;
1522 // Destroy pthread condition variable.
1523 pthread_cond_destroy( &handle->condition );
1525 stream_.apiHandle = 0;
1527 stream_.mode = UNINITIALIZED;
1528 stream_.state = STREAM_CLOSED;
1531 void RtApiCore :: startStream( void )
1534 if ( stream_.state == STREAM_RUNNING ) {
1535 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1536 error( RtAudioError::WARNING );
1540 OSStatus result = noErr;
1541 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1542 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1544 result = AudioDeviceStart( handle->id[0], callbackHandler );
1545 if ( result != noErr ) {
1546 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1547 errorText_ = errorStream_.str();
1552 if ( stream_.mode == INPUT ||
1553 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1555 result = AudioDeviceStart( handle->id[1], callbackHandler );
1556 if ( result != noErr ) {
1557 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1558 errorText_ = errorStream_.str();
1563 handle->drainCounter = 0;
1564 handle->internalDrain = false;
1565 stream_.state = STREAM_RUNNING;
1568 if ( result == noErr ) return;
1569 error( RtAudioError::SYSTEM_ERROR );
1572 void RtApiCore :: stopStream( void )
1575 if ( stream_.state == STREAM_STOPPED ) {
1576 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1577 error( RtAudioError::WARNING );
1581 OSStatus result = noErr;
1582 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1583 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1585 if ( handle->drainCounter == 0 ) {
1586 handle->drainCounter = 2;
1587 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1590 result = AudioDeviceStop( handle->id[0], callbackHandler );
1591 if ( result != noErr ) {
1592 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1593 errorText_ = errorStream_.str();
1598 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1600 result = AudioDeviceStop( handle->id[1], callbackHandler );
1601 if ( result != noErr ) {
1602 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1603 errorText_ = errorStream_.str();
1608 stream_.state = STREAM_STOPPED;
1611 if ( result == noErr ) return;
1612 error( RtAudioError::SYSTEM_ERROR );
1615 void RtApiCore :: abortStream( void )
1618 if ( stream_.state == STREAM_STOPPED ) {
1619 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1620 error( RtAudioError::WARNING );
1624 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1625 handle->drainCounter = 2;
1630 // This function will be called by a spawned thread when the user
1631 // callback function signals that the stream should be stopped or
1632 // aborted. It is better to handle it this way because the
1633 // callbackEvent() function probably should return before the AudioDeviceStop()
1634 // function is called.
1635 static void *coreStopStream( void *ptr )
1637 CallbackInfo *info = (CallbackInfo *) ptr;
1638 RtApiCore *object = (RtApiCore *) info->object;
1640 object->stopStream();
1641 pthread_exit( NULL );
1644 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1645 const AudioBufferList *inBufferList,
1646 const AudioBufferList *outBufferList )
1648 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1649 if ( stream_.state == STREAM_CLOSED ) {
1650 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1651 error( RtAudioError::WARNING );
1655 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1656 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1658 // Check if we were draining the stream and signal is finished.
1659 if ( handle->drainCounter > 3 ) {
1660 ThreadHandle threadId;
1662 stream_.state = STREAM_STOPPING;
1663 if ( handle->internalDrain == true )
1664 pthread_create( &threadId, NULL, coreStopStream, info );
1665 else // external call to stopStream()
1666 pthread_cond_signal( &handle->condition );
1670 AudioDeviceID outputDevice = handle->id[0];
1672 // Invoke user callback to get fresh output data UNLESS we are
1673 // draining stream or duplex mode AND the input/output devices are
1674 // different AND this function is called for the input device.
1675 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1676 RtAudioCallback callback = (RtAudioCallback) info->callback;
1677 double streamTime = getStreamTime();
1678 RtAudioStreamStatus status = 0;
1679 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1680 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1681 handle->xrun[0] = false;
1683 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1684 status |= RTAUDIO_INPUT_OVERFLOW;
1685 handle->xrun[1] = false;
1688 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1689 stream_.bufferSize, streamTime, status, info->userData );
1690 if ( cbReturnValue == 2 ) {
1691 stream_.state = STREAM_STOPPING;
1692 handle->drainCounter = 2;
1696 else if ( cbReturnValue == 1 ) {
1697 handle->drainCounter = 1;
1698 handle->internalDrain = true;
1702 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1704 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1706 if ( handle->nStreams[0] == 1 ) {
1707 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1709 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1711 else { // fill multiple streams with zeros
1712 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1713 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1715 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1719 else if ( handle->nStreams[0] == 1 ) {
1720 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1721 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1722 stream_.userBuffer[0], stream_.convertInfo[0] );
1724 else { // copy from user buffer
1725 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1726 stream_.userBuffer[0],
1727 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1730 else { // fill multiple streams
1731 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1732 if ( stream_.doConvertBuffer[0] ) {
1733 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1734 inBuffer = (Float32 *) stream_.deviceBuffer;
1737 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1738 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1739 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1740 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1741 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1744 else { // fill multiple multi-channel streams with interleaved data
1745 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1748 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1749 UInt32 inChannels = stream_.nUserChannels[0];
1750 if ( stream_.doConvertBuffer[0] ) {
1751 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1752 inChannels = stream_.nDeviceChannels[0];
1755 if ( inInterleaved ) inOffset = 1;
1756 else inOffset = stream_.bufferSize;
1758 channelsLeft = inChannels;
1759 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1761 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1762 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1765 // Account for possible channel offset in first stream
1766 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1767 streamChannels -= stream_.channelOffset[0];
1768 outJump = stream_.channelOffset[0];
1772 // Account for possible unfilled channels at end of the last stream
1773 if ( streamChannels > channelsLeft ) {
1774 outJump = streamChannels - channelsLeft;
1775 streamChannels = channelsLeft;
1778 // Determine input buffer offsets and skips
1779 if ( inInterleaved ) {
1780 inJump = inChannels;
1781 in += inChannels - channelsLeft;
1785 in += (inChannels - channelsLeft) * inOffset;
1788 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1789 for ( unsigned int j=0; j<streamChannels; j++ ) {
1790 *out++ = in[j*inOffset];
1795 channelsLeft -= streamChannels;
1801 // Don't bother draining input
1802 if ( handle->drainCounter ) {
1803 handle->drainCounter++;
1807 AudioDeviceID inputDevice;
1808 inputDevice = handle->id[1];
1809 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1811 if ( handle->nStreams[1] == 1 ) {
1812 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1813 convertBuffer( stream_.userBuffer[1],
1814 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1815 stream_.convertInfo[1] );
1817 else { // copy to user buffer
1818 memcpy( stream_.userBuffer[1],
1819 inBufferList->mBuffers[handle->iStream[1]].mData,
1820 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1823 else { // read from multiple streams
1824 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1825 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1827 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1828 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1829 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1830 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1831 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1834 else { // read from multiple multi-channel streams
1835 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1838 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1839 UInt32 outChannels = stream_.nUserChannels[1];
1840 if ( stream_.doConvertBuffer[1] ) {
1841 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1842 outChannels = stream_.nDeviceChannels[1];
1845 if ( outInterleaved ) outOffset = 1;
1846 else outOffset = stream_.bufferSize;
1848 channelsLeft = outChannels;
1849 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1851 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1852 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1855 // Account for possible channel offset in first stream
1856 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1857 streamChannels -= stream_.channelOffset[1];
1858 inJump = stream_.channelOffset[1];
1862 // Account for possible unread channels at end of the last stream
1863 if ( streamChannels > channelsLeft ) {
1864 inJump = streamChannels - channelsLeft;
1865 streamChannels = channelsLeft;
1868 // Determine output buffer offsets and skips
1869 if ( outInterleaved ) {
1870 outJump = outChannels;
1871 out += outChannels - channelsLeft;
1875 out += (outChannels - channelsLeft) * outOffset;
1878 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1879 for ( unsigned int j=0; j<streamChannels; j++ ) {
1880 out[j*outOffset] = *in++;
1885 channelsLeft -= streamChannels;
1889 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1890 convertBuffer( stream_.userBuffer[1],
1891 stream_.deviceBuffer,
1892 stream_.convertInfo[1] );
1898 //MUTEX_UNLOCK( &stream_.mutex );
1900 RtApi::tickStreamTime();
1904 const char* RtApiCore :: getErrorCode( OSStatus code )
1908 case kAudioHardwareNotRunningError:
1909 return "kAudioHardwareNotRunningError";
1911 case kAudioHardwareUnspecifiedError:
1912 return "kAudioHardwareUnspecifiedError";
1914 case kAudioHardwareUnknownPropertyError:
1915 return "kAudioHardwareUnknownPropertyError";
1917 case kAudioHardwareBadPropertySizeError:
1918 return "kAudioHardwareBadPropertySizeError";
1920 case kAudioHardwareIllegalOperationError:
1921 return "kAudioHardwareIllegalOperationError";
1923 case kAudioHardwareBadObjectError:
1924 return "kAudioHardwareBadObjectError";
1926 case kAudioHardwareBadDeviceError:
1927 return "kAudioHardwareBadDeviceError";
1929 case kAudioHardwareBadStreamError:
1930 return "kAudioHardwareBadStreamError";
1932 case kAudioHardwareUnsupportedOperationError:
1933 return "kAudioHardwareUnsupportedOperationError";
1935 case kAudioDeviceUnsupportedFormatError:
1936 return "kAudioDeviceUnsupportedFormatError";
1938 case kAudioDevicePermissionsError:
1939 return "kAudioDevicePermissionsError";
1942 return "CoreAudio unknown error";
1946 //******************** End of __MACOSX_CORE__ *********************//
1949 #if defined(__UNIX_JACK__)
1951 // JACK is a low-latency audio server, originally written for the
1952 // GNU/Linux operating system and now also ported to OS-X. It can
1953 // connect a number of different applications to an audio device, as
1954 // well as allowing them to share audio between themselves.
1956 // When using JACK with RtAudio, "devices" refer to JACK clients that
1957 // have ports connected to the server. The JACK server is typically
1958 // started in a terminal as follows:
1960 // .jackd -d alsa -d hw:0
1962 // or through an interface program such as qjackctl. Many of the
1963 // parameters normally set for a stream are fixed by the JACK server
1964 // and can be specified when the JACK server is started. In
1967 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1969 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1970 // frames, and number of buffers = 4. Once the server is running, it
1971 // is not possible to override these values. If the values are not
1972 // specified in the command-line, the JACK server uses default values.
1974 // The JACK server does not have to be running when an instance of
1975 // RtApiJack is created, though the function getDeviceCount() will
1976 // report 0 devices found until JACK has been started. When no
1977 // devices are available (i.e., the JACK server is not running), a
1978 // stream cannot be opened.
1980 #include <jack/jack.h>
1984 // A structure to hold various information related to the Jack API
1987 jack_client_t *client;
1988 jack_port_t **ports[2];
1989 std::string deviceName[2];
1991 pthread_cond_t condition;
1992 int drainCounter; // Tracks callback counts when draining
1993 bool internalDrain; // Indicates if stop is initiated from callback or not.
1996 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1999 #if !defined(__RTAUDIO_DEBUG__)
2000 static void jackSilentError( const char * ) {};
2003 RtApiJack :: RtApiJack()
2004 :shouldAutoconnect_(true) {
2005 // Nothing to do here.
2006 #if !defined(__RTAUDIO_DEBUG__)
2007 // Turn off Jack's internal error reporting.
2008 jack_set_error_function( &jackSilentError );
2012 RtApiJack :: ~RtApiJack()
2014 if ( stream_.state != STREAM_CLOSED ) closeStream();
2017 unsigned int RtApiJack :: getDeviceCount( void )
2019 // See if we can become a jack client.
2020 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2021 jack_status_t *status = NULL;
2022 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2023 if ( client == 0 ) return 0;
2026 std::string port, previousPort;
2027 unsigned int nChannels = 0, nDevices = 0;
2028 ports = jack_get_ports( client, NULL, NULL, 0 );
2030 // Parse the port names up to the first colon (:).
2033 port = (char *) ports[ nChannels ];
2034 iColon = port.find(":");
2035 if ( iColon != std::string::npos ) {
2036 port = port.substr( 0, iColon + 1 );
2037 if ( port != previousPort ) {
2039 previousPort = port;
2042 } while ( ports[++nChannels] );
2046 jack_client_close( client );
2050 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2052 RtAudio::DeviceInfo info;
2053 info.probed = false;
2055 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2056 jack_status_t *status = NULL;
2057 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2058 if ( client == 0 ) {
2059 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2060 error( RtAudioError::WARNING );
2065 std::string port, previousPort;
2066 unsigned int nPorts = 0, nDevices = 0;
2067 ports = jack_get_ports( client, NULL, NULL, 0 );
2069 // Parse the port names up to the first colon (:).
2072 port = (char *) ports[ nPorts ];
2073 iColon = port.find(":");
2074 if ( iColon != std::string::npos ) {
2075 port = port.substr( 0, iColon );
2076 if ( port != previousPort ) {
2077 if ( nDevices == device ) info.name = port;
2079 previousPort = port;
2082 } while ( ports[++nPorts] );
2086 if ( device >= nDevices ) {
2087 jack_client_close( client );
2088 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2089 error( RtAudioError::INVALID_USE );
2093 // Get the current jack server sample rate.
2094 info.sampleRates.clear();
2096 info.preferredSampleRate = jack_get_sample_rate( client );
2097 info.sampleRates.push_back( info.preferredSampleRate );
2099 // Count the available ports containing the client name as device
2100 // channels. Jack "input ports" equal RtAudio output channels.
2101 unsigned int nChannels = 0;
2102 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2104 while ( ports[ nChannels ] ) nChannels++;
2106 info.outputChannels = nChannels;
2109 // Jack "output ports" equal RtAudio input channels.
2111 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2113 while ( ports[ nChannels ] ) nChannels++;
2115 info.inputChannels = nChannels;
2118 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2119 jack_client_close(client);
2120 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2121 error( RtAudioError::WARNING );
2125 // If device opens for both playback and capture, we determine the channels.
2126 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2127 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2129 // Jack always uses 32-bit floats.
2130 info.nativeFormats = RTAUDIO_FLOAT32;
2132 // Jack doesn't provide default devices so we'll use the first available one.
2133 if ( device == 0 && info.outputChannels > 0 )
2134 info.isDefaultOutput = true;
2135 if ( device == 0 && info.inputChannels > 0 )
2136 info.isDefaultInput = true;
2138 jack_client_close(client);
2143 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2145 CallbackInfo *info = (CallbackInfo *) infoPointer;
2147 RtApiJack *object = (RtApiJack *) info->object;
2148 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2153 // This function will be called by a spawned thread when the Jack
2154 // server signals that it is shutting down. It is necessary to handle
2155 // it this way because the jackShutdown() function must return before
2156 // the jack_deactivate() function (in closeStream()) will return.
2157 static void *jackCloseStream( void *ptr )
2159 CallbackInfo *info = (CallbackInfo *) ptr;
2160 RtApiJack *object = (RtApiJack *) info->object;
2162 object->closeStream();
2164 pthread_exit( NULL );
2166 static void jackShutdown( void *infoPointer )
2168 CallbackInfo *info = (CallbackInfo *) infoPointer;
2169 RtApiJack *object = (RtApiJack *) info->object;
2171 // Check current stream state. If stopped, then we'll assume this
2172 // was called as a result of a call to RtApiJack::stopStream (the
2173 // deactivation of a client handle causes this function to be called).
2174 // If not, we'll assume the Jack server is shutting down or some
2175 // other problem occurred and we should close the stream.
2176 if ( object->isStreamRunning() == false ) return;
2178 ThreadHandle threadId;
2179 pthread_create( &threadId, NULL, jackCloseStream, info );
2180 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2183 static int jackXrun( void *infoPointer )
2185 JackHandle *handle = *((JackHandle **) infoPointer);
2187 if ( handle->ports[0] ) handle->xrun[0] = true;
2188 if ( handle->ports[1] ) handle->xrun[1] = true;
2193 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2194 unsigned int firstChannel, unsigned int sampleRate,
2195 RtAudioFormat format, unsigned int *bufferSize,
2196 RtAudio::StreamOptions *options )
2198 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2200 // Look for jack server and try to become a client (only do once per stream).
2201 jack_client_t *client = 0;
2202 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2203 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2204 jack_status_t *status = NULL;
2205 if ( options && !options->streamName.empty() )
2206 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2208 client = jack_client_open( "RtApiJack", jackoptions, status );
2209 if ( client == 0 ) {
2210 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2211 error( RtAudioError::WARNING );
2216 // The handle must have been created on an earlier pass.
2217 client = handle->client;
2221 std::string port, previousPort, deviceName;
2222 unsigned int nPorts = 0, nDevices = 0;
2223 ports = jack_get_ports( client, NULL, NULL, 0 );
2225 // Parse the port names up to the first colon (:).
2228 port = (char *) ports[ nPorts ];
2229 iColon = port.find(":");
2230 if ( iColon != std::string::npos ) {
2231 port = port.substr( 0, iColon );
2232 if ( port != previousPort ) {
2233 if ( nDevices == device ) deviceName = port;
2235 previousPort = port;
2238 } while ( ports[++nPorts] );
2242 if ( device >= nDevices ) {
2243 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2247 // Count the available ports containing the client name as device
2248 // channels. Jack "input ports" equal RtAudio output channels.
2249 unsigned int nChannels = 0;
2250 unsigned long flag = JackPortIsInput;
2251 if ( mode == INPUT ) flag = JackPortIsOutput;
2252 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2254 while ( ports[ nChannels ] ) nChannels++;
2258 // Compare the jack ports for specified client to the requested number of channels.
2259 if ( nChannels < (channels + firstChannel) ) {
2260 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2261 errorText_ = errorStream_.str();
2265 // Check the jack server sample rate.
2266 unsigned int jackRate = jack_get_sample_rate( client );
2267 if ( sampleRate != jackRate ) {
2268 jack_client_close( client );
2269 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2270 errorText_ = errorStream_.str();
2273 stream_.sampleRate = jackRate;
2275 // Get the latency of the JACK port.
2276 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2277 if ( ports[ firstChannel ] ) {
2279 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2280 // the range (usually the min and max are equal)
2281 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2282 // get the latency range
2283 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2284 // be optimistic, use the min!
2285 stream_.latency[mode] = latrange.min;
2286 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2290 // The jack server always uses 32-bit floating-point data.
2291 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2292 stream_.userFormat = format;
2294 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2295 else stream_.userInterleaved = true;
2297 // Jack always uses non-interleaved buffers.
2298 stream_.deviceInterleaved[mode] = false;
2300 // Jack always provides host byte-ordered data.
2301 stream_.doByteSwap[mode] = false;
2303 // Get the buffer size. The buffer size and number of buffers
2304 // (periods) is set when the jack server is started.
2305 stream_.bufferSize = (int) jack_get_buffer_size( client );
2306 *bufferSize = stream_.bufferSize;
2308 stream_.nDeviceChannels[mode] = channels;
2309 stream_.nUserChannels[mode] = channels;
2311 // Set flags for buffer conversion.
2312 stream_.doConvertBuffer[mode] = false;
2313 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2314 stream_.doConvertBuffer[mode] = true;
2315 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2316 stream_.nUserChannels[mode] > 1 )
2317 stream_.doConvertBuffer[mode] = true;
2319 // Allocate our JackHandle structure for the stream.
2320 if ( handle == 0 ) {
2322 handle = new JackHandle;
2324 catch ( std::bad_alloc& ) {
2325 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2329 if ( pthread_cond_init(&handle->condition, NULL) ) {
2330 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2333 stream_.apiHandle = (void *) handle;
2334 handle->client = client;
2336 handle->deviceName[mode] = deviceName;
2338 // Allocate necessary internal buffers.
2339 unsigned long bufferBytes;
2340 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2341 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2342 if ( stream_.userBuffer[mode] == NULL ) {
2343 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2347 if ( stream_.doConvertBuffer[mode] ) {
2349 bool makeBuffer = true;
2350 if ( mode == OUTPUT )
2351 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2352 else { // mode == INPUT
2353 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2354 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2355 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2356 if ( bufferBytes < bytesOut ) makeBuffer = false;
2361 bufferBytes *= *bufferSize;
2362 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2363 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2364 if ( stream_.deviceBuffer == NULL ) {
2365 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2371 // Allocate memory for the Jack ports (channels) identifiers.
2372 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2373 if ( handle->ports[mode] == NULL ) {
2374 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2378 stream_.device[mode] = device;
2379 stream_.channelOffset[mode] = firstChannel;
2380 stream_.state = STREAM_STOPPED;
2381 stream_.callbackInfo.object = (void *) this;
2383 if ( stream_.mode == OUTPUT && mode == INPUT )
2384 // We had already set up the stream for output.
2385 stream_.mode = DUPLEX;
2387 stream_.mode = mode;
2388 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2389 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2390 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2393 // Register our ports.
2395 if ( mode == OUTPUT ) {
2396 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2397 snprintf( label, 64, "outport %d", i );
2398 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2399 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2403 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2404 snprintf( label, 64, "inport %d", i );
2405 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2406 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2410 // Setup the buffer conversion information structure. We don't use
2411 // buffers to do channel offsets, so we override that parameter
2413 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2415 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2421 pthread_cond_destroy( &handle->condition );
2422 jack_client_close( handle->client );
2424 if ( handle->ports[0] ) free( handle->ports[0] );
2425 if ( handle->ports[1] ) free( handle->ports[1] );
2428 stream_.apiHandle = 0;
2431 for ( int i=0; i<2; i++ ) {
2432 if ( stream_.userBuffer[i] ) {
2433 free( stream_.userBuffer[i] );
2434 stream_.userBuffer[i] = 0;
2438 if ( stream_.deviceBuffer ) {
2439 free( stream_.deviceBuffer );
2440 stream_.deviceBuffer = 0;
2446 void RtApiJack :: closeStream( void )
2448 if ( stream_.state == STREAM_CLOSED ) {
2449 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2450 error( RtAudioError::WARNING );
2454 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2457 if ( stream_.state == STREAM_RUNNING )
2458 jack_deactivate( handle->client );
2460 jack_client_close( handle->client );
2464 if ( handle->ports[0] ) free( handle->ports[0] );
2465 if ( handle->ports[1] ) free( handle->ports[1] );
2466 pthread_cond_destroy( &handle->condition );
2468 stream_.apiHandle = 0;
2471 for ( int i=0; i<2; i++ ) {
2472 if ( stream_.userBuffer[i] ) {
2473 free( stream_.userBuffer[i] );
2474 stream_.userBuffer[i] = 0;
2478 if ( stream_.deviceBuffer ) {
2479 free( stream_.deviceBuffer );
2480 stream_.deviceBuffer = 0;
2483 stream_.mode = UNINITIALIZED;
2484 stream_.state = STREAM_CLOSED;
2487 void RtApiJack :: startStream( void )
2490 if ( stream_.state == STREAM_RUNNING ) {
2491 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2492 error( RtAudioError::WARNING );
2496 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2497 int result = jack_activate( handle->client );
2499 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2505 // Get the list of available ports.
2506 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2508 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2509 if ( ports == NULL) {
2510 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2514 // Now make the port connections. Since RtAudio wasn't designed to
2515 // allow the user to select particular channels of a device, we'll
2516 // just open the first "nChannels" ports with offset.
2517 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2519 if ( ports[ stream_.channelOffset[0] + i ] )
2520 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2523 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2530 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2532 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2533 if ( ports == NULL) {
2534 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2538 // Now make the port connections. See note above.
2539 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2541 if ( ports[ stream_.channelOffset[1] + i ] )
2542 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2545 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2552 handle->drainCounter = 0;
2553 handle->internalDrain = false;
2554 stream_.state = STREAM_RUNNING;
2557 if ( result == 0 ) return;
2558 error( RtAudioError::SYSTEM_ERROR );
2561 void RtApiJack :: stopStream( void )
2564 if ( stream_.state == STREAM_STOPPED ) {
2565 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2566 error( RtAudioError::WARNING );
2570 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2571 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2573 if ( handle->drainCounter == 0 ) {
2574 handle->drainCounter = 2;
2575 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2579 jack_deactivate( handle->client );
2580 stream_.state = STREAM_STOPPED;
2583 void RtApiJack :: abortStream( void )
2586 if ( stream_.state == STREAM_STOPPED ) {
2587 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2588 error( RtAudioError::WARNING );
2592 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2593 handle->drainCounter = 2;
2598 // This function will be called by a spawned thread when the user
2599 // callback function signals that the stream should be stopped or
2600 // aborted. It is necessary to handle it this way because the
2601 // callbackEvent() function must return before the jack_deactivate()
2602 // function will return.
2603 static void *jackStopStream( void *ptr )
2605 CallbackInfo *info = (CallbackInfo *) ptr;
2606 RtApiJack *object = (RtApiJack *) info->object;
2608 object->stopStream();
2609 pthread_exit( NULL );
2612 bool RtApiJack :: callbackEvent( unsigned long nframes )
2614 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2615 if ( stream_.state == STREAM_CLOSED ) {
2616 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2617 error( RtAudioError::WARNING );
2620 if ( stream_.bufferSize != nframes ) {
2621 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2622 error( RtAudioError::WARNING );
2626 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2627 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2629 // Check if we were draining the stream and signal is finished.
2630 if ( handle->drainCounter > 3 ) {
2631 ThreadHandle threadId;
2633 stream_.state = STREAM_STOPPING;
2634 if ( handle->internalDrain == true )
2635 pthread_create( &threadId, NULL, jackStopStream, info );
2637 pthread_cond_signal( &handle->condition );
2641 // Invoke user callback first, to get fresh output data.
2642 if ( handle->drainCounter == 0 ) {
2643 RtAudioCallback callback = (RtAudioCallback) info->callback;
2644 double streamTime = getStreamTime();
2645 RtAudioStreamStatus status = 0;
2646 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2647 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2648 handle->xrun[0] = false;
2650 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2651 status |= RTAUDIO_INPUT_OVERFLOW;
2652 handle->xrun[1] = false;
2654 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2655 stream_.bufferSize, streamTime, status, info->userData );
2656 if ( cbReturnValue == 2 ) {
2657 stream_.state = STREAM_STOPPING;
2658 handle->drainCounter = 2;
2660 pthread_create( &id, NULL, jackStopStream, info );
2663 else if ( cbReturnValue == 1 ) {
2664 handle->drainCounter = 1;
2665 handle->internalDrain = true;
2669 jack_default_audio_sample_t *jackbuffer;
2670 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2671 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2673 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2675 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2676 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2677 memset( jackbuffer, 0, bufferBytes );
2681 else if ( stream_.doConvertBuffer[0] ) {
2683 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2685 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2686 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2687 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2690 else { // no buffer conversion
2691 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2692 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2693 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2698 // Don't bother draining input
2699 if ( handle->drainCounter ) {
2700 handle->drainCounter++;
2704 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2706 if ( stream_.doConvertBuffer[1] ) {
2707 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2708 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2709 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2711 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2713 else { // no buffer conversion
2714 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2715 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2716 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2722 RtApi::tickStreamTime();
2725 //******************** End of __UNIX_JACK__ *********************//
2728 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2730 // The ASIO API is designed around a callback scheme, so this
2731 // implementation is similar to that used for OS-X CoreAudio and Linux
2732 // Jack. The primary constraint with ASIO is that it only allows
2733 // access to a single driver at a time. Thus, it is not possible to
2734 // have more than one simultaneous RtAudio stream.
2736 // This implementation also requires a number of external ASIO files
2737 // and a few global variables. The ASIO callback scheme does not
2738 // allow for the passing of user data, so we must create a global
2739 // pointer to our callbackInfo structure.
2741 // On unix systems, we make use of a pthread condition variable.
2742 // Since there is no equivalent in Windows, I hacked something based
2743 // on information found in
2744 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2746 #include "asiosys.h"
2748 #include "iasiothiscallresolver.h"
2749 #include "asiodrivers.h"
2752 static AsioDrivers drivers;
2753 static ASIOCallbacks asioCallbacks;
2754 static ASIODriverInfo driverInfo;
2755 static CallbackInfo *asioCallbackInfo;
2756 static bool asioXRun;
2759 int drainCounter; // Tracks callback counts when draining
2760 bool internalDrain; // Indicates if stop is initiated from callback or not.
2761 ASIOBufferInfo *bufferInfos;
2765 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2768 // Function declarations (definitions at end of section)
2769 static const char* getAsioErrorString( ASIOError result );
2770 static void sampleRateChanged( ASIOSampleRate sRate );
2771 static long asioMessages( long selector, long value, void* message, double* opt );
2773 RtApiAsio :: RtApiAsio()
2775 // ASIO cannot run on a multi-threaded appartment. You can call
2776 // CoInitialize beforehand, but it must be for appartment threading
2777 // (in which case, CoInitilialize will return S_FALSE here).
2778 coInitialized_ = false;
2779 HRESULT hr = CoInitialize( NULL );
2781 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2782 error( RtAudioError::WARNING );
2784 coInitialized_ = true;
2786 drivers.removeCurrentDriver();
2787 driverInfo.asioVersion = 2;
2789 // See note in DirectSound implementation about GetDesktopWindow().
2790 driverInfo.sysRef = GetForegroundWindow();
2793 RtApiAsio :: ~RtApiAsio()
2795 if ( stream_.state != STREAM_CLOSED ) closeStream();
2796 if ( coInitialized_ ) CoUninitialize();
2799 unsigned int RtApiAsio :: getDeviceCount( void )
2801 return (unsigned int) drivers.asioGetNumDev();
2804 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2806 RtAudio::DeviceInfo info;
2807 info.probed = false;
2810 unsigned int nDevices = getDeviceCount();
2811 if ( nDevices == 0 ) {
2812 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2813 error( RtAudioError::INVALID_USE );
2817 if ( device >= nDevices ) {
2818 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2819 error( RtAudioError::INVALID_USE );
2823 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2824 if ( stream_.state != STREAM_CLOSED ) {
2825 if ( device >= devices_.size() ) {
2826 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2827 error( RtAudioError::WARNING );
2830 return devices_[ device ];
2833 char driverName[32];
2834 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2835 if ( result != ASE_OK ) {
2836 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2837 errorText_ = errorStream_.str();
2838 error( RtAudioError::WARNING );
2842 info.name = driverName;
2844 if ( !drivers.loadDriver( driverName ) ) {
2845 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 result = ASIOInit( &driverInfo );
2852 if ( result != ASE_OK ) {
2853 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2854 errorText_ = errorStream_.str();
2855 error( RtAudioError::WARNING );
2859 // Determine the device channel information.
2860 long inputChannels, outputChannels;
2861 result = ASIOGetChannels( &inputChannels, &outputChannels );
2862 if ( result != ASE_OK ) {
2863 drivers.removeCurrentDriver();
2864 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2865 errorText_ = errorStream_.str();
2866 error( RtAudioError::WARNING );
2870 info.outputChannels = outputChannels;
2871 info.inputChannels = inputChannels;
2872 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2873 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2875 // Determine the supported sample rates.
2876 info.sampleRates.clear();
2877 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2878 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2879 if ( result == ASE_OK ) {
2880 info.sampleRates.push_back( SAMPLE_RATES[i] );
2882 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2883 info.preferredSampleRate = SAMPLE_RATES[i];
2887 // Determine supported data types ... just check first channel and assume rest are the same.
2888 ASIOChannelInfo channelInfo;
2889 channelInfo.channel = 0;
2890 channelInfo.isInput = true;
2891 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2892 result = ASIOGetChannelInfo( &channelInfo );
2893 if ( result != ASE_OK ) {
2894 drivers.removeCurrentDriver();
2895 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2896 errorText_ = errorStream_.str();
2897 error( RtAudioError::WARNING );
2901 info.nativeFormats = 0;
2902 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2903 info.nativeFormats |= RTAUDIO_SINT16;
2904 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2905 info.nativeFormats |= RTAUDIO_SINT32;
2906 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2907 info.nativeFormats |= RTAUDIO_FLOAT32;
2908 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2909 info.nativeFormats |= RTAUDIO_FLOAT64;
2910 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2911 info.nativeFormats |= RTAUDIO_SINT24;
2913 if ( info.outputChannels > 0 )
2914 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2915 if ( info.inputChannels > 0 )
2916 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2919 drivers.removeCurrentDriver();
2923 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2925 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2926 object->callbackEvent( index );
2929 void RtApiAsio :: saveDeviceInfo( void )
2933 unsigned int nDevices = getDeviceCount();
2934 devices_.resize( nDevices );
2935 for ( unsigned int i=0; i<nDevices; i++ )
2936 devices_[i] = getDeviceInfo( i );
2939 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2940 unsigned int firstChannel, unsigned int sampleRate,
2941 RtAudioFormat format, unsigned int *bufferSize,
2942 RtAudio::StreamOptions *options )
2943 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2945 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2947 // For ASIO, a duplex stream MUST use the same driver.
2948 if ( isDuplexInput && stream_.device[0] != device ) {
2949 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2953 char driverName[32];
2954 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2955 if ( result != ASE_OK ) {
2956 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2957 errorText_ = errorStream_.str();
2961 // Only load the driver once for duplex stream.
2962 if ( !isDuplexInput ) {
2963 // The getDeviceInfo() function will not work when a stream is open
2964 // because ASIO does not allow multiple devices to run at the same
2965 // time. Thus, we'll probe the system before opening a stream and
2966 // save the results for use by getDeviceInfo().
2967 this->saveDeviceInfo();
2969 if ( !drivers.loadDriver( driverName ) ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2971 errorText_ = errorStream_.str();
2975 result = ASIOInit( &driverInfo );
2976 if ( result != ASE_OK ) {
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2978 errorText_ = errorStream_.str();
2983 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2984 bool buffersAllocated = false;
2985 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2986 unsigned int nChannels;
2989 // Check the device channel count.
2990 long inputChannels, outputChannels;
2991 result = ASIOGetChannels( &inputChannels, &outputChannels );
2992 if ( result != ASE_OK ) {
2993 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2994 errorText_ = errorStream_.str();
2998 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2999 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3000 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3001 errorText_ = errorStream_.str();
3004 stream_.nDeviceChannels[mode] = channels;
3005 stream_.nUserChannels[mode] = channels;
3006 stream_.channelOffset[mode] = firstChannel;
3008 // Verify the sample rate is supported.
3009 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3010 if ( result != ASE_OK ) {
3011 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3012 errorText_ = errorStream_.str();
3016 // Get the current sample rate
3017 ASIOSampleRate currentRate;
3018 result = ASIOGetSampleRate( ¤tRate );
3019 if ( result != ASE_OK ) {
3020 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3021 errorText_ = errorStream_.str();
3025 // Set the sample rate only if necessary
3026 if ( currentRate != sampleRate ) {
3027 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3028 if ( result != ASE_OK ) {
3029 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3030 errorText_ = errorStream_.str();
3035 // Determine the driver data type.
3036 ASIOChannelInfo channelInfo;
3037 channelInfo.channel = 0;
3038 if ( mode == OUTPUT ) channelInfo.isInput = false;
3039 else channelInfo.isInput = true;
3040 result = ASIOGetChannelInfo( &channelInfo );
3041 if ( result != ASE_OK ) {
3042 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3043 errorText_ = errorStream_.str();
3047 // Assuming WINDOWS host is always little-endian.
3048 stream_.doByteSwap[mode] = false;
3049 stream_.userFormat = format;
3050 stream_.deviceFormat[mode] = 0;
3051 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3052 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3053 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3055 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3056 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3057 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3059 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3060 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3061 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3063 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3064 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3065 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3067 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3068 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3069 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3072 if ( stream_.deviceFormat[mode] == 0 ) {
3073 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3074 errorText_ = errorStream_.str();
3078 // Set the buffer size. For a duplex stream, this will end up
3079 // setting the buffer size based on the input constraints, which
3081 long minSize, maxSize, preferSize, granularity;
3082 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3083 if ( result != ASE_OK ) {
3084 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3085 errorText_ = errorStream_.str();
3089 if ( isDuplexInput ) {
3090 // When this is the duplex input (output was opened before), then we have to use the same
3091 // buffersize as the output, because it might use the preferred buffer size, which most
3092 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3093 // So instead of throwing an error, make them equal. The caller uses the reference
3094 // to the "bufferSize" param as usual to set up processing buffers.
3096 *bufferSize = stream_.bufferSize;
3099 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3100 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3101 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3102 else if ( granularity == -1 ) {
3103 // Make sure bufferSize is a power of two.
3104 int log2_of_min_size = 0;
3105 int log2_of_max_size = 0;
3107 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3108 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3109 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3112 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3113 int min_delta_num = log2_of_min_size;
3115 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3116 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3117 if (current_delta < min_delta) {
3118 min_delta = current_delta;
3123 *bufferSize = ( (unsigned int)1 << min_delta_num );
3124 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3125 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3127 else if ( granularity != 0 ) {
3128 // Set to an even multiple of granularity, rounding up.
3129 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3134 // we don't use it anymore, see above!
3135 // Just left it here for the case...
3136 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3137 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3142 stream_.bufferSize = *bufferSize;
3143 stream_.nBuffers = 2;
3145 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3146 else stream_.userInterleaved = true;
3148 // ASIO always uses non-interleaved buffers.
3149 stream_.deviceInterleaved[mode] = false;
3151 // Allocate, if necessary, our AsioHandle structure for the stream.
3152 if ( handle == 0 ) {
3154 handle = new AsioHandle;
3156 catch ( std::bad_alloc& ) {
3157 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3160 handle->bufferInfos = 0;
3162 // Create a manual-reset event.
3163 handle->condition = CreateEvent( NULL, // no security
3164 TRUE, // manual-reset
3165 FALSE, // non-signaled initially
3167 stream_.apiHandle = (void *) handle;
3170 // Create the ASIO internal buffers. Since RtAudio sets up input
3171 // and output separately, we'll have to dispose of previously
3172 // created output buffers for a duplex stream.
3173 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3174 ASIODisposeBuffers();
3175 if ( handle->bufferInfos ) free( handle->bufferInfos );
3178 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3180 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3181 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3182 if ( handle->bufferInfos == NULL ) {
3183 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3184 errorText_ = errorStream_.str();
3188 ASIOBufferInfo *infos;
3189 infos = handle->bufferInfos;
3190 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3191 infos->isInput = ASIOFalse;
3192 infos->channelNum = i + stream_.channelOffset[0];
3193 infos->buffers[0] = infos->buffers[1] = 0;
3195 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3196 infos->isInput = ASIOTrue;
3197 infos->channelNum = i + stream_.channelOffset[1];
3198 infos->buffers[0] = infos->buffers[1] = 0;
3201 // prepare for callbacks
3202 stream_.sampleRate = sampleRate;
3203 stream_.device[mode] = device;
3204 stream_.mode = isDuplexInput ? DUPLEX : mode;
3206 // store this class instance before registering callbacks, that are going to use it
3207 asioCallbackInfo = &stream_.callbackInfo;
3208 stream_.callbackInfo.object = (void *) this;
3210 // Set up the ASIO callback structure and create the ASIO data buffers.
3211 asioCallbacks.bufferSwitch = &bufferSwitch;
3212 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3213 asioCallbacks.asioMessage = &asioMessages;
3214 asioCallbacks.bufferSwitchTimeInfo = NULL;
3215 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3216 if ( result != ASE_OK ) {
3217 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3218 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3219 // in that case, let's be naïve and try that instead
3220 *bufferSize = preferSize;
3221 stream_.bufferSize = *bufferSize;
3222 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3225 if ( result != ASE_OK ) {
3226 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3227 errorText_ = errorStream_.str();
3230 buffersAllocated = true;
3231 stream_.state = STREAM_STOPPED;
3233 // Set flags for buffer conversion.
3234 stream_.doConvertBuffer[mode] = false;
3235 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3236 stream_.doConvertBuffer[mode] = true;
3237 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3238 stream_.nUserChannels[mode] > 1 )
3239 stream_.doConvertBuffer[mode] = true;
3241 // Allocate necessary internal buffers
3242 unsigned long bufferBytes;
3243 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3244 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3245 if ( stream_.userBuffer[mode] == NULL ) {
3246 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3250 if ( stream_.doConvertBuffer[mode] ) {
3252 bool makeBuffer = true;
3253 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3254 if ( isDuplexInput && stream_.deviceBuffer ) {
3255 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3256 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3260 bufferBytes *= *bufferSize;
3261 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3262 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3263 if ( stream_.deviceBuffer == NULL ) {
3264 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3270 // Determine device latencies
3271 long inputLatency, outputLatency;
3272 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3273 if ( result != ASE_OK ) {
3274 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3275 errorText_ = errorStream_.str();
3276 error( RtAudioError::WARNING); // warn but don't fail
3279 stream_.latency[0] = outputLatency;
3280 stream_.latency[1] = inputLatency;
3283 // Setup the buffer conversion information structure. We don't use
3284 // buffers to do channel offsets, so we override that parameter
3286 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3291 if ( !isDuplexInput ) {
3292 // the cleanup for error in the duplex input, is done by RtApi::openStream
3293 // So we clean up for single channel only
3295 if ( buffersAllocated )
3296 ASIODisposeBuffers();
3298 drivers.removeCurrentDriver();
3301 CloseHandle( handle->condition );
3302 if ( handle->bufferInfos )
3303 free( handle->bufferInfos );
3306 stream_.apiHandle = 0;
3310 if ( stream_.userBuffer[mode] ) {
3311 free( stream_.userBuffer[mode] );
3312 stream_.userBuffer[mode] = 0;
3315 if ( stream_.deviceBuffer ) {
3316 free( stream_.deviceBuffer );
3317 stream_.deviceBuffer = 0;
3322 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3324 void RtApiAsio :: closeStream()
3326 if ( stream_.state == STREAM_CLOSED ) {
3327 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3328 error( RtAudioError::WARNING );
3332 if ( stream_.state == STREAM_RUNNING ) {
3333 stream_.state = STREAM_STOPPED;
3336 ASIODisposeBuffers();
3337 drivers.removeCurrentDriver();
3339 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3341 CloseHandle( handle->condition );
3342 if ( handle->bufferInfos )
3343 free( handle->bufferInfos );
3345 stream_.apiHandle = 0;
3348 for ( int i=0; i<2; i++ ) {
3349 if ( stream_.userBuffer[i] ) {
3350 free( stream_.userBuffer[i] );
3351 stream_.userBuffer[i] = 0;
3355 if ( stream_.deviceBuffer ) {
3356 free( stream_.deviceBuffer );
3357 stream_.deviceBuffer = 0;
3360 stream_.mode = UNINITIALIZED;
3361 stream_.state = STREAM_CLOSED;
3364 bool stopThreadCalled = false;
3366 void RtApiAsio :: startStream()
3369 if ( stream_.state == STREAM_RUNNING ) {
3370 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3371 error( RtAudioError::WARNING );
3375 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3376 ASIOError result = ASIOStart();
3377 if ( result != ASE_OK ) {
3378 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3379 errorText_ = errorStream_.str();
3383 handle->drainCounter = 0;
3384 handle->internalDrain = false;
3385 ResetEvent( handle->condition );
3386 stream_.state = STREAM_RUNNING;
3390 stopThreadCalled = false;
3392 if ( result == ASE_OK ) return;
3393 error( RtAudioError::SYSTEM_ERROR );
3396 void RtApiAsio :: stopStream()
3399 if ( stream_.state == STREAM_STOPPED ) {
3400 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3401 error( RtAudioError::WARNING );
3405 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3406 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3407 if ( handle->drainCounter == 0 ) {
3408 handle->drainCounter = 2;
3409 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3413 stream_.state = STREAM_STOPPED;
3415 ASIOError result = ASIOStop();
3416 if ( result != ASE_OK ) {
3417 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3418 errorText_ = errorStream_.str();
3421 if ( result == ASE_OK ) return;
3422 error( RtAudioError::SYSTEM_ERROR );
3425 void RtApiAsio :: abortStream()
3428 if ( stream_.state == STREAM_STOPPED ) {
3429 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3430 error( RtAudioError::WARNING );
3434 // The following lines were commented-out because some behavior was
3435 // noted where the device buffers need to be zeroed to avoid
3436 // continuing sound, even when the device buffers are completely
3437 // disposed. So now, calling abort is the same as calling stop.
3438 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3439 // handle->drainCounter = 2;
3443 // This function will be called by a spawned thread when the user
3444 // callback function signals that the stream should be stopped or
3445 // aborted. It is necessary to handle it this way because the
3446 // callbackEvent() function must return before the ASIOStop()
3447 // function will return.
3448 static unsigned __stdcall asioStopStream( void *ptr )
3450 CallbackInfo *info = (CallbackInfo *) ptr;
3451 RtApiAsio *object = (RtApiAsio *) info->object;
3453 object->stopStream();
3458 bool RtApiAsio :: callbackEvent( long bufferIndex )
3460 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3461 if ( stream_.state == STREAM_CLOSED ) {
3462 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3463 error( RtAudioError::WARNING );
3467 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3468 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3470 // Check if we were draining the stream and signal if finished.
3471 if ( handle->drainCounter > 3 ) {
3473 stream_.state = STREAM_STOPPING;
3474 if ( handle->internalDrain == false )
3475 SetEvent( handle->condition );
3476 else { // spawn a thread to stop the stream
3478 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3479 &stream_.callbackInfo, 0, &threadId );
3484 // Invoke user callback to get fresh output data UNLESS we are
3486 if ( handle->drainCounter == 0 ) {
3487 RtAudioCallback callback = (RtAudioCallback) info->callback;
3488 double streamTime = getStreamTime();
3489 RtAudioStreamStatus status = 0;
3490 if ( stream_.mode != INPUT && asioXRun == true ) {
3491 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3494 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3495 status |= RTAUDIO_INPUT_OVERFLOW;
3498 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3499 stream_.bufferSize, streamTime, status, info->userData );
3500 if ( cbReturnValue == 2 ) {
3501 stream_.state = STREAM_STOPPING;
3502 handle->drainCounter = 2;
3504 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3505 &stream_.callbackInfo, 0, &threadId );
3508 else if ( cbReturnValue == 1 ) {
3509 handle->drainCounter = 1;
3510 handle->internalDrain = true;
3514 unsigned int nChannels, bufferBytes, i, j;
3515 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3516 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3518 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3520 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3524 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3528 else if ( stream_.doConvertBuffer[0] ) {
3530 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3531 if ( stream_.doByteSwap[0] )
3532 byteSwapBuffer( stream_.deviceBuffer,
3533 stream_.bufferSize * stream_.nDeviceChannels[0],
3534 stream_.deviceFormat[0] );
3536 for ( i=0, j=0; i<nChannels; i++ ) {
3537 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3538 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3539 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3545 if ( stream_.doByteSwap[0] )
3546 byteSwapBuffer( stream_.userBuffer[0],
3547 stream_.bufferSize * stream_.nUserChannels[0],
3548 stream_.userFormat );
3550 for ( i=0, j=0; i<nChannels; i++ ) {
3551 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3552 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3553 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3559 // Don't bother draining input
3560 if ( handle->drainCounter ) {
3561 handle->drainCounter++;
3565 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3567 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3569 if (stream_.doConvertBuffer[1]) {
3571 // Always interleave ASIO input data.
3572 for ( i=0, j=0; i<nChannels; i++ ) {
3573 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3574 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3575 handle->bufferInfos[i].buffers[bufferIndex],
3579 if ( stream_.doByteSwap[1] )
3580 byteSwapBuffer( stream_.deviceBuffer,
3581 stream_.bufferSize * stream_.nDeviceChannels[1],
3582 stream_.deviceFormat[1] );
3583 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3587 for ( i=0, j=0; i<nChannels; i++ ) {
3588 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3589 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3590 handle->bufferInfos[i].buffers[bufferIndex],
3595 if ( stream_.doByteSwap[1] )
3596 byteSwapBuffer( stream_.userBuffer[1],
3597 stream_.bufferSize * stream_.nUserChannels[1],
3598 stream_.userFormat );
3603 // The following call was suggested by Malte Clasen. While the API
3604 // documentation indicates it should not be required, some device
3605 // drivers apparently do not function correctly without it.
3608 RtApi::tickStreamTime();
3612 static void sampleRateChanged( ASIOSampleRate sRate )
3614 // The ASIO documentation says that this usually only happens during
3615 // external sync. Audio processing is not stopped by the driver,
3616 // actual sample rate might not have even changed, maybe only the
3617 // sample rate status of an AES/EBU or S/PDIF digital input at the
3620 RtApi *object = (RtApi *) asioCallbackInfo->object;
3622 object->stopStream();
3624 catch ( RtAudioError &exception ) {
3625 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3629 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3632 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3636 switch( selector ) {
3637 case kAsioSelectorSupported:
3638 if ( value == kAsioResetRequest
3639 || value == kAsioEngineVersion
3640 || value == kAsioResyncRequest
3641 || value == kAsioLatenciesChanged
3642 // The following three were added for ASIO 2.0, you don't
3643 // necessarily have to support them.
3644 || value == kAsioSupportsTimeInfo
3645 || value == kAsioSupportsTimeCode
3646 || value == kAsioSupportsInputMonitor)
3649 case kAsioResetRequest:
3650 // Defer the task and perform the reset of the driver during the
3651 // next "safe" situation. You cannot reset the driver right now,
3652 // as this code is called from the driver. Reset the driver is
3653 // done by completely destruct is. I.e. ASIOStop(),
3654 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3656 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3659 case kAsioResyncRequest:
3660 // This informs the application that the driver encountered some
3661 // non-fatal data loss. It is used for synchronization purposes
3662 // of different media. Added mainly to work around the Win16Mutex
3663 // problems in Windows 95/98 with the Windows Multimedia system,
3664 // which could lose data because the Mutex was held too long by
3665 // another thread. However a driver can issue it in other
3667 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3671 case kAsioLatenciesChanged:
3672 // This will inform the host application that the drivers were
3673 // latencies changed. Beware, it this does not mean that the
3674 // buffer sizes have changed! You might need to update internal
3676 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3679 case kAsioEngineVersion:
3680 // Return the supported ASIO version of the host application. If
3681 // a host application does not implement this selector, ASIO 1.0
3682 // is assumed by the driver.
3685 case kAsioSupportsTimeInfo:
3686 // Informs the driver whether the
3687 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3688 // For compatibility with ASIO 1.0 drivers the host application
3689 // should always support the "old" bufferSwitch method, too.
3692 case kAsioSupportsTimeCode:
3693 // Informs the driver whether application is interested in time
3694 // code info. If an application does not need to know about time
3695 // code, the driver has less work to do.
3702 static const char* getAsioErrorString( ASIOError result )
3710 static const Messages m[] =
3712 { ASE_NotPresent, "Hardware input or output is not present or available." },
3713 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3714 { ASE_InvalidParameter, "Invalid input parameter." },
3715 { ASE_InvalidMode, "Invalid mode." },
3716 { ASE_SPNotAdvancing, "Sample position not advancing." },
3717 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3718 { ASE_NoMemory, "Not enough memory to complete the request." }
3721 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3722 if ( m[i].value == result ) return m[i].message;
3724 return "Unknown error.";
3727 //******************** End of __WINDOWS_ASIO__ *********************//
3731 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3733 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3734 // - Introduces support for the Windows WASAPI API
3735 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3736 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3737 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3742 #include <audioclient.h>
3744 #include <mmdeviceapi.h>
3745 #include <functiondiscoverykeys_devpkey.h>
3748 //=============================================================================
3750 #define SAFE_RELEASE( objectPtr )\
3753 objectPtr->Release();\
3757 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3759 //-----------------------------------------------------------------------------
3761 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3762 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3763 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3764 // provide intermediate storage for read / write synchronization.
3778 // sets the length of the internal ring buffer
3779 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3782 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3784 bufferSize_ = bufferSize;
3789 // attempt to push a buffer into the ring buffer at the current "in" index
3790 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3792 if ( !buffer || // incoming buffer is NULL
3793 bufferSize == 0 || // incoming buffer has no data
3794 bufferSize > bufferSize_ ) // incoming buffer too large
3799 unsigned int relOutIndex = outIndex_;
3800 unsigned int inIndexEnd = inIndex_ + bufferSize;
3801 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3802 relOutIndex += bufferSize_;
3805 // "in" index can end on the "out" index but cannot begin at it
3806 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3807 return false; // not enough space between "in" index and "out" index
3810 // copy buffer from external to internal
3811 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3812 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3813 int fromInSize = bufferSize - fromZeroSize;
3818 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3819 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3821 case RTAUDIO_SINT16:
3822 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3823 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3825 case RTAUDIO_SINT24:
3826 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3827 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3829 case RTAUDIO_SINT32:
3830 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3831 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3833 case RTAUDIO_FLOAT32:
3834 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3835 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3837 case RTAUDIO_FLOAT64:
3838 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3839 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3843 // update "in" index
3844 inIndex_ += bufferSize;
3845 inIndex_ %= bufferSize_;
3850 // attempt to pull a buffer from the ring buffer from the current "out" index
3851 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3853 if ( !buffer || // incoming buffer is NULL
3854 bufferSize == 0 || // incoming buffer has no data
3855 bufferSize > bufferSize_ ) // incoming buffer too large
3860 unsigned int relInIndex = inIndex_;
3861 unsigned int outIndexEnd = outIndex_ + bufferSize;
3862 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3863 relInIndex += bufferSize_;
3866 // "out" index can begin at and end on the "in" index
3867 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3868 return false; // not enough space between "out" index and "in" index
3871 // copy buffer from internal to external
3872 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3873 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3874 int fromOutSize = bufferSize - fromZeroSize;
3879 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3880 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3882 case RTAUDIO_SINT16:
3883 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3884 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3886 case RTAUDIO_SINT24:
3887 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3888 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3890 case RTAUDIO_SINT32:
3891 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3892 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3894 case RTAUDIO_FLOAT32:
3895 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3896 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3898 case RTAUDIO_FLOAT64:
3899 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3900 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3904 // update "out" index
3905 outIndex_ += bufferSize;
3906 outIndex_ %= bufferSize_;
3913 unsigned int bufferSize_;
3914 unsigned int inIndex_;
3915 unsigned int outIndex_;
3918 //-----------------------------------------------------------------------------
3920 // A structure to hold various information related to the WASAPI implementation.
3923 IAudioClient* captureAudioClient;
3924 IAudioClient* renderAudioClient;
3925 IAudioCaptureClient* captureClient;
3926 IAudioRenderClient* renderClient;
3927 HANDLE captureEvent;
3931 : captureAudioClient( NULL ),
3932 renderAudioClient( NULL ),
3933 captureClient( NULL ),
3934 renderClient( NULL ),
3935 captureEvent( NULL ),
3936 renderEvent( NULL ) {}
3939 //=============================================================================
3941 RtApiWasapi::RtApiWasapi()
3942 : coInitialized_( false ), deviceEnumerator_( NULL )
3944 // WASAPI can run either apartment or multi-threaded
3945 HRESULT hr = CoInitialize( NULL );
3946 if ( !FAILED( hr ) )
3947 coInitialized_ = true;
3949 // Instantiate device enumerator
3950 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3951 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3952 ( void** ) &deviceEnumerator_ );
3954 if ( FAILED( hr ) ) {
3955 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3956 error( RtAudioError::DRIVER_ERROR );
3960 //-----------------------------------------------------------------------------
3962 RtApiWasapi::~RtApiWasapi()
3964 if ( stream_.state != STREAM_CLOSED )
3967 SAFE_RELEASE( deviceEnumerator_ );
3969 // If this object previously called CoInitialize()
3970 if ( coInitialized_ )
3974 //=============================================================================
3976 unsigned int RtApiWasapi::getDeviceCount( void )
3978 unsigned int captureDeviceCount = 0;
3979 unsigned int renderDeviceCount = 0;
3981 IMMDeviceCollection* captureDevices = NULL;
3982 IMMDeviceCollection* renderDevices = NULL;
3984 // Count capture devices
3986 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3987 if ( FAILED( hr ) ) {
3988 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3992 hr = captureDevices->GetCount( &captureDeviceCount );
3993 if ( FAILED( hr ) ) {
3994 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3998 // Count render devices
3999 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4000 if ( FAILED( hr ) ) {
4001 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4005 hr = renderDevices->GetCount( &renderDeviceCount );
4006 if ( FAILED( hr ) ) {
4007 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4012 // release all references
4013 SAFE_RELEASE( captureDevices );
4014 SAFE_RELEASE( renderDevices );
4016 if ( errorText_.empty() )
4017 return captureDeviceCount + renderDeviceCount;
4019 error( RtAudioError::DRIVER_ERROR );
4023 //-----------------------------------------------------------------------------
4025 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4027 RtAudio::DeviceInfo info;
4028 unsigned int captureDeviceCount = 0;
4029 unsigned int renderDeviceCount = 0;
4030 std::string defaultDeviceName;
4031 bool isCaptureDevice = false;
4033 PROPVARIANT deviceNameProp;
4034 PROPVARIANT defaultDeviceNameProp;
4036 IMMDeviceCollection* captureDevices = NULL;
4037 IMMDeviceCollection* renderDevices = NULL;
4038 IMMDevice* devicePtr = NULL;
4039 IMMDevice* defaultDevicePtr = NULL;
4040 IAudioClient* audioClient = NULL;
4041 IPropertyStore* devicePropStore = NULL;
4042 IPropertyStore* defaultDevicePropStore = NULL;
4044 WAVEFORMATEX* deviceFormat = NULL;
4045 WAVEFORMATEX* closestMatchFormat = NULL;
4048 info.probed = false;
4050 // Count capture devices
4052 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4053 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4054 if ( FAILED( hr ) ) {
4055 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4059 hr = captureDevices->GetCount( &captureDeviceCount );
4060 if ( FAILED( hr ) ) {
4061 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4065 // Count render devices
4066 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4067 if ( FAILED( hr ) ) {
4068 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4072 hr = renderDevices->GetCount( &renderDeviceCount );
4073 if ( FAILED( hr ) ) {
4074 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4078 // validate device index
4079 if ( device >= captureDeviceCount + renderDeviceCount ) {
4080 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4081 errorType = RtAudioError::INVALID_USE;
4085 // determine whether index falls within capture or render devices
4086 if ( device >= renderDeviceCount ) {
4087 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4088 if ( FAILED( hr ) ) {
4089 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4092 isCaptureDevice = true;
4095 hr = renderDevices->Item( device, &devicePtr );
4096 if ( FAILED( hr ) ) {
4097 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4100 isCaptureDevice = false;
4103 // get default device name
4104 if ( isCaptureDevice ) {
4105 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4106 if ( FAILED( hr ) ) {
4107 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4112 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4113 if ( FAILED( hr ) ) {
4114 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4119 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4120 if ( FAILED( hr ) ) {
4121 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4124 PropVariantInit( &defaultDeviceNameProp );
4126 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4127 if ( FAILED( hr ) ) {
4128 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4132 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4135 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4136 if ( FAILED( hr ) ) {
4137 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4141 PropVariantInit( &deviceNameProp );
4143 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4144 if ( FAILED( hr ) ) {
4145 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4149 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4152 if ( isCaptureDevice ) {
4153 info.isDefaultInput = info.name == defaultDeviceName;
4154 info.isDefaultOutput = false;
4157 info.isDefaultInput = false;
4158 info.isDefaultOutput = info.name == defaultDeviceName;
4162 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4163 if ( FAILED( hr ) ) {
4164 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4168 hr = audioClient->GetMixFormat( &deviceFormat );
4169 if ( FAILED( hr ) ) {
4170 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4174 if ( isCaptureDevice ) {
4175 info.inputChannels = deviceFormat->nChannels;
4176 info.outputChannels = 0;
4177 info.duplexChannels = 0;
4180 info.inputChannels = 0;
4181 info.outputChannels = deviceFormat->nChannels;
4182 info.duplexChannels = 0;
4185 // sample rates (WASAPI only supports the one native sample rate)
4186 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4188 info.sampleRates.clear();
4189 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4192 info.nativeFormats = 0;
4194 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4195 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4196 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4198 if ( deviceFormat->wBitsPerSample == 32 ) {
4199 info.nativeFormats |= RTAUDIO_FLOAT32;
4201 else if ( deviceFormat->wBitsPerSample == 64 ) {
4202 info.nativeFormats |= RTAUDIO_FLOAT64;
4205 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4206 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4207 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4209 if ( deviceFormat->wBitsPerSample == 8 ) {
4210 info.nativeFormats |= RTAUDIO_SINT8;
4212 else if ( deviceFormat->wBitsPerSample == 16 ) {
4213 info.nativeFormats |= RTAUDIO_SINT16;
4215 else if ( deviceFormat->wBitsPerSample == 24 ) {
4216 info.nativeFormats |= RTAUDIO_SINT24;
4218 else if ( deviceFormat->wBitsPerSample == 32 ) {
4219 info.nativeFormats |= RTAUDIO_SINT32;
4227 // release all references
4228 PropVariantClear( &deviceNameProp );
4229 PropVariantClear( &defaultDeviceNameProp );
4231 SAFE_RELEASE( captureDevices );
4232 SAFE_RELEASE( renderDevices );
4233 SAFE_RELEASE( devicePtr );
4234 SAFE_RELEASE( defaultDevicePtr );
4235 SAFE_RELEASE( audioClient );
4236 SAFE_RELEASE( devicePropStore );
4237 SAFE_RELEASE( defaultDevicePropStore );
4239 CoTaskMemFree( deviceFormat );
4240 CoTaskMemFree( closestMatchFormat );
4242 if ( !errorText_.empty() )
4247 //-----------------------------------------------------------------------------
4249 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4251 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4252 if ( getDeviceInfo( i ).isDefaultOutput ) {
4260 //-----------------------------------------------------------------------------
4262 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4264 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4265 if ( getDeviceInfo( i ).isDefaultInput ) {
4273 //-----------------------------------------------------------------------------
4275 void RtApiWasapi::closeStream( void )
4277 if ( stream_.state == STREAM_CLOSED ) {
4278 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4279 error( RtAudioError::WARNING );
4283 if ( stream_.state != STREAM_STOPPED )
4286 // clean up stream memory
4287 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4288 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4290 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4291 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4293 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4294 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4296 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4297 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4299 delete ( WasapiHandle* ) stream_.apiHandle;
4300 stream_.apiHandle = NULL;
4302 for ( int i = 0; i < 2; i++ ) {
4303 if ( stream_.userBuffer[i] ) {
4304 free( stream_.userBuffer[i] );
4305 stream_.userBuffer[i] = 0;
4309 if ( stream_.deviceBuffer ) {
4310 free( stream_.deviceBuffer );
4311 stream_.deviceBuffer = 0;
4314 // update stream state
4315 stream_.state = STREAM_CLOSED;
4318 //-----------------------------------------------------------------------------
4320 void RtApiWasapi::startStream( void )
4324 if ( stream_.state == STREAM_RUNNING ) {
4325 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4326 error( RtAudioError::WARNING );
4330 // update stream state
4331 stream_.state = STREAM_RUNNING;
4333 // create WASAPI stream thread
4334 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4336 if ( !stream_.callbackInfo.thread ) {
4337 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4338 error( RtAudioError::THREAD_ERROR );
4341 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4342 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4346 //-----------------------------------------------------------------------------
4348 void RtApiWasapi::stopStream( void )
4352 if ( stream_.state == STREAM_STOPPED ) {
4353 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4354 error( RtAudioError::WARNING );
4358 // inform stream thread by setting stream state to STREAM_STOPPING
4359 stream_.state = STREAM_STOPPING;
4361 // wait until stream thread is stopped
4362 while( stream_.state != STREAM_STOPPED ) {
4366 // Wait for the last buffer to play before stopping.
4367 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4369 // stop capture client if applicable
4370 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4371 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4372 if ( FAILED( hr ) ) {
4373 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4374 error( RtAudioError::DRIVER_ERROR );
4379 // stop render client if applicable
4380 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4381 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4382 if ( FAILED( hr ) ) {
4383 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4384 error( RtAudioError::DRIVER_ERROR );
4389 // close thread handle
4390 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4391 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4392 error( RtAudioError::THREAD_ERROR );
4396 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4399 //-----------------------------------------------------------------------------
4401 void RtApiWasapi::abortStream( void )
4405 if ( stream_.state == STREAM_STOPPED ) {
4406 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4407 error( RtAudioError::WARNING );
4411 // inform stream thread by setting stream state to STREAM_STOPPING
4412 stream_.state = STREAM_STOPPING;
4414 // wait until stream thread is stopped
4415 while ( stream_.state != STREAM_STOPPED ) {
4419 // stop capture client if applicable
4420 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4421 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4422 if ( FAILED( hr ) ) {
4423 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4424 error( RtAudioError::DRIVER_ERROR );
4429 // stop render client if applicable
4430 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4431 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4432 if ( FAILED( hr ) ) {
4433 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4434 error( RtAudioError::DRIVER_ERROR );
4439 // close thread handle
4440 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4441 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4442 error( RtAudioError::THREAD_ERROR );
4446 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4449 //-----------------------------------------------------------------------------
4451 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4452 unsigned int firstChannel, unsigned int sampleRate,
4453 RtAudioFormat format, unsigned int* bufferSize,
4454 RtAudio::StreamOptions* options )
4456 bool methodResult = FAILURE;
4457 unsigned int captureDeviceCount = 0;
4458 unsigned int renderDeviceCount = 0;
4460 IMMDeviceCollection* captureDevices = NULL;
4461 IMMDeviceCollection* renderDevices = NULL;
4462 IMMDevice* devicePtr = NULL;
4463 WAVEFORMATEX* deviceFormat = NULL;
4464 unsigned int bufferBytes;
4465 stream_.state = STREAM_STOPPED;
4466 RtAudio::DeviceInfo deviceInfo;
4468 // create API Handle if not already created
4469 if ( !stream_.apiHandle )
4470 stream_.apiHandle = ( void* ) new WasapiHandle();
4472 // Count capture devices
4474 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4475 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4476 if ( FAILED( hr ) ) {
4477 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4481 hr = captureDevices->GetCount( &captureDeviceCount );
4482 if ( FAILED( hr ) ) {
4483 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4487 // Count render devices
4488 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4489 if ( FAILED( hr ) ) {
4490 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4494 hr = renderDevices->GetCount( &renderDeviceCount );
4495 if ( FAILED( hr ) ) {
4496 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4500 // validate device index
4501 if ( device >= captureDeviceCount + renderDeviceCount ) {
4502 errorType = RtAudioError::INVALID_USE;
4503 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4507 deviceInfo = getDeviceInfo( device );
4509 // validate sample rate
4510 if ( sampleRate != deviceInfo.preferredSampleRate )
4512 errorType = RtAudioError::INVALID_USE;
4513 std::stringstream ss;
4514 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4515 << "Hz sample rate not supported. This device only supports "
4516 << deviceInfo.preferredSampleRate << "Hz.";
4517 errorText_ = ss.str();
4521 // determine whether index falls within capture or render devices
4522 if ( device >= renderDeviceCount ) {
4523 if ( mode != INPUT ) {
4524 errorType = RtAudioError::INVALID_USE;
4525 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4529 // retrieve captureAudioClient from devicePtr
4530 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4532 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4533 if ( FAILED( hr ) ) {
4534 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4538 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4539 NULL, ( void** ) &captureAudioClient );
4540 if ( FAILED( hr ) ) {
4541 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4545 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4546 if ( FAILED( hr ) ) {
4547 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4551 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4552 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4555 if ( mode != OUTPUT ) {
4556 errorType = RtAudioError::INVALID_USE;
4557 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4561 // retrieve renderAudioClient from devicePtr
4562 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4564 hr = renderDevices->Item( device, &devicePtr );
4565 if ( FAILED( hr ) ) {
4566 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4570 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4571 NULL, ( void** ) &renderAudioClient );
4572 if ( FAILED( hr ) ) {
4573 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4577 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4578 if ( FAILED( hr ) ) {
4579 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4583 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4584 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4588 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4589 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4590 stream_.mode = DUPLEX;
4593 stream_.mode = mode;
4596 stream_.device[mode] = device;
4597 stream_.doByteSwap[mode] = false;
4598 stream_.sampleRate = sampleRate;
4599 stream_.bufferSize = *bufferSize;
4600 stream_.nBuffers = 1;
4601 stream_.nUserChannels[mode] = channels;
4602 stream_.channelOffset[mode] = firstChannel;
4603 stream_.userFormat = format;
4604 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4606 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4607 stream_.userInterleaved = false;
4609 stream_.userInterleaved = true;
4610 stream_.deviceInterleaved[mode] = true;
4612 // Set flags for buffer conversion.
4613 stream_.doConvertBuffer[mode] = false;
4614 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4615 stream_.nUserChannels != stream_.nDeviceChannels )
4616 stream_.doConvertBuffer[mode] = true;
4617 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4618 stream_.nUserChannels[mode] > 1 )
4619 stream_.doConvertBuffer[mode] = true;
4621 if ( stream_.doConvertBuffer[mode] )
4622 setConvertInfo( mode, 0 );
4624 // Allocate necessary internal buffers
4625 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4627 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4628 if ( !stream_.userBuffer[mode] ) {
4629 errorType = RtAudioError::MEMORY_ERROR;
4630 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4634 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4635 stream_.callbackInfo.priority = 15;
4637 stream_.callbackInfo.priority = 0;
4639 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4640 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4642 methodResult = SUCCESS;
4646 SAFE_RELEASE( captureDevices );
4647 SAFE_RELEASE( renderDevices );
4648 SAFE_RELEASE( devicePtr );
4649 CoTaskMemFree( deviceFormat );
4651 // if method failed, close the stream
4652 if ( methodResult == FAILURE )
4655 if ( !errorText_.empty() )
4657 return methodResult;
4660 //=============================================================================
4662 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4665 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4670 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4673 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4678 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4681 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4686 //-----------------------------------------------------------------------------
4688 void RtApiWasapi::wasapiThread()
4690 // as this is a new thread, we must CoInitialize it
4691 CoInitialize( NULL );
4695 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4696 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4697 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4698 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4699 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4700 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4702 WAVEFORMATEX* captureFormat = NULL;
4703 WAVEFORMATEX* renderFormat = NULL;
4704 WasapiBuffer captureBuffer;
4705 WasapiBuffer renderBuffer;
4707 // declare local stream variables
4708 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4709 BYTE* streamBuffer = NULL;
4710 unsigned long captureFlags = 0;
4711 unsigned int bufferFrameCount = 0;
4712 unsigned int numFramesPadding = 0;
4713 bool callbackPushed = false;
4714 bool callbackPulled = false;
4715 bool callbackStopped = false;
4716 int callbackResult = 0;
4718 unsigned int deviceBuffSize = 0;
4721 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4723 // Attempt to assign "Pro Audio" characteristic to thread
4724 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4726 DWORD taskIndex = 0;
4727 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4728 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4729 FreeLibrary( AvrtDll );
4732 // start capture stream if applicable
4733 if ( captureAudioClient ) {
4734 hr = captureAudioClient->GetMixFormat( &captureFormat );
4735 if ( FAILED( hr ) ) {
4736 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4740 // initialize capture stream according to desire buffer size
4741 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4743 if ( !captureClient ) {
4744 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4745 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4746 desiredBufferPeriod,
4747 desiredBufferPeriod,
4750 if ( FAILED( hr ) ) {
4751 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4755 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4756 ( void** ) &captureClient );
4757 if ( FAILED( hr ) ) {
4758 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4762 // configure captureEvent to trigger on every available capture buffer
4763 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4764 if ( !captureEvent ) {
4765 errorType = RtAudioError::SYSTEM_ERROR;
4766 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4770 hr = captureAudioClient->SetEventHandle( captureEvent );
4771 if ( FAILED( hr ) ) {
4772 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4776 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4777 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4780 unsigned int inBufferSize = 0;
4781 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4782 if ( FAILED( hr ) ) {
4783 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4787 // scale outBufferSize according to stream->user sample rate ratio
4788 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4789 inBufferSize *= stream_.nDeviceChannels[INPUT];
4791 // set captureBuffer size
4792 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4794 // reset the capture stream
4795 hr = captureAudioClient->Reset();
4796 if ( FAILED( hr ) ) {
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4801 // start the capture stream
4802 hr = captureAudioClient->Start();
4803 if ( FAILED( hr ) ) {
4804 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4809 // start render stream if applicable
4810 if ( renderAudioClient ) {
4811 hr = renderAudioClient->GetMixFormat( &renderFormat );
4812 if ( FAILED( hr ) ) {
4813 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4817 // initialize render stream according to desire buffer size
4818 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4820 if ( !renderClient ) {
4821 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4822 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4823 desiredBufferPeriod,
4824 desiredBufferPeriod,
4827 if ( FAILED( hr ) ) {
4828 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4832 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4833 ( void** ) &renderClient );
4834 if ( FAILED( hr ) ) {
4835 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4839 // configure renderEvent to trigger on every available render buffer
4840 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4841 if ( !renderEvent ) {
4842 errorType = RtAudioError::SYSTEM_ERROR;
4843 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4847 hr = renderAudioClient->SetEventHandle( renderEvent );
4848 if ( FAILED( hr ) ) {
4849 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4853 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4854 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4857 unsigned int outBufferSize = 0;
4858 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4859 if ( FAILED( hr ) ) {
4860 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4864 // scale inBufferSize according to user->stream sample rate ratio
4865 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4866 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4868 // set renderBuffer size
4869 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4871 // reset the render stream
4872 hr = renderAudioClient->Reset();
4873 if ( FAILED( hr ) ) {
4874 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4878 // start the render stream
4879 hr = renderAudioClient->Start();
4880 if ( FAILED( hr ) ) {
4881 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4886 if ( stream_.mode == INPUT ) {
4887 using namespace std; // for roundf
4888 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4890 else if ( stream_.mode == OUTPUT ) {
4891 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4893 else if ( stream_.mode == DUPLEX ) {
4894 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4895 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4898 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4899 if ( !stream_.deviceBuffer ) {
4900 errorType = RtAudioError::MEMORY_ERROR;
4901 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4905 // stream process loop
4906 while ( stream_.state != STREAM_STOPPING ) {
4907 if ( !callbackPulled ) {
4910 // 1. Pull callback buffer from inputBuffer
4911 // 2. If 1. was successful: Convert callback buffer to user format
4913 if ( captureAudioClient ) {
4914 // Pull callback buffer from inputBuffer
4915 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4916 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4917 stream_.deviceFormat[INPUT] );
4919 if ( callbackPulled ) {
4920 if ( stream_.doConvertBuffer[INPUT] ) {
4921 // Convert callback buffer to user format
4922 convertBuffer( stream_.userBuffer[INPUT],
4923 stream_.deviceBuffer,
4924 stream_.convertInfo[INPUT] );
4927 // no further conversion, simple copy deviceBuffer to userBuffer
4928 memcpy( stream_.userBuffer[INPUT],
4929 stream_.deviceBuffer,
4930 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4935 // if there is no capture stream, set callbackPulled flag
4936 callbackPulled = true;
4941 // 1. Execute user callback method
4942 // 2. Handle return value from callback
4944 // if callback has not requested the stream to stop
4945 if ( callbackPulled && !callbackStopped ) {
4946 // Execute user callback method
4947 callbackResult = callback( stream_.userBuffer[OUTPUT],
4948 stream_.userBuffer[INPUT],
4951 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4952 stream_.callbackInfo.userData );
4954 // Handle return value from callback
4955 if ( callbackResult == 1 ) {
4956 // instantiate a thread to stop this thread
4957 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4958 if ( !threadHandle ) {
4959 errorType = RtAudioError::THREAD_ERROR;
4960 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4963 else if ( !CloseHandle( threadHandle ) ) {
4964 errorType = RtAudioError::THREAD_ERROR;
4965 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4969 callbackStopped = true;
4971 else if ( callbackResult == 2 ) {
4972 // instantiate a thread to stop this thread
4973 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4974 if ( !threadHandle ) {
4975 errorType = RtAudioError::THREAD_ERROR;
4976 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4979 else if ( !CloseHandle( threadHandle ) ) {
4980 errorType = RtAudioError::THREAD_ERROR;
4981 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4985 callbackStopped = true;
4992 // 1. Convert callback buffer to stream format
4993 // 2. Push callback buffer into outputBuffer
4995 if ( renderAudioClient && callbackPulled ) {
4996 if ( stream_.doConvertBuffer[OUTPUT] ) {
4997 // Convert callback buffer to stream format
4998 convertBuffer( stream_.deviceBuffer,
4999 stream_.userBuffer[OUTPUT],
5000 stream_.convertInfo[OUTPUT] );
5004 // Push callback buffer into outputBuffer
5005 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5006 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5007 stream_.deviceFormat[OUTPUT] );
5010 // if there is no render stream, set callbackPushed flag
5011 callbackPushed = true;
5016 // 1. Get capture buffer from stream
5017 // 2. Push capture buffer into inputBuffer
5018 // 3. If 2. was successful: Release capture buffer
5020 if ( captureAudioClient ) {
5021 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5022 if ( !callbackPulled ) {
5023 WaitForSingleObject( captureEvent, INFINITE );
5026 // Get capture buffer from stream
5027 hr = captureClient->GetBuffer( &streamBuffer,
5029 &captureFlags, NULL, NULL );
5030 if ( FAILED( hr ) ) {
5031 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5035 if ( bufferFrameCount != 0 ) {
5036 // Push capture buffer into inputBuffer
5037 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5038 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5039 stream_.deviceFormat[INPUT] ) )
5041 // Release capture buffer
5042 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5043 if ( FAILED( hr ) ) {
5044 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5050 // Inform WASAPI that capture was unsuccessful
5051 hr = captureClient->ReleaseBuffer( 0 );
5052 if ( FAILED( hr ) ) {
5053 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5060 // Inform WASAPI that capture was unsuccessful
5061 hr = captureClient->ReleaseBuffer( 0 );
5062 if ( FAILED( hr ) ) {
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5071 // 1. Get render buffer from stream
5072 // 2. Pull next buffer from outputBuffer
5073 // 3. If 2. was successful: Fill render buffer with next buffer
5074 // Release render buffer
5076 if ( renderAudioClient ) {
5077 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5078 if ( callbackPulled && !callbackPushed ) {
5079 WaitForSingleObject( renderEvent, INFINITE );
5082 // Get render buffer from stream
5083 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5084 if ( FAILED( hr ) ) {
5085 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5089 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5090 if ( FAILED( hr ) ) {
5091 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5095 bufferFrameCount -= numFramesPadding;
5097 if ( bufferFrameCount != 0 ) {
5098 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5099 if ( FAILED( hr ) ) {
5100 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5104 // Pull next buffer from outputBuffer
5105 // Fill render buffer with next buffer
5106 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5107 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5108 stream_.deviceFormat[OUTPUT] ) )
5110 // Release render buffer
5111 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5112 if ( FAILED( hr ) ) {
5113 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5119 // Inform WASAPI that render was unsuccessful
5120 hr = renderClient->ReleaseBuffer( 0, 0 );
5121 if ( FAILED( hr ) ) {
5122 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5129 // Inform WASAPI that render was unsuccessful
5130 hr = renderClient->ReleaseBuffer( 0, 0 );
5131 if ( FAILED( hr ) ) {
5132 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5138 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5139 if ( callbackPushed ) {
5140 callbackPulled = false;
5142 RtApi::tickStreamTime();
5149 CoTaskMemFree( captureFormat );
5150 CoTaskMemFree( renderFormat );
5154 // update stream state
5155 stream_.state = STREAM_STOPPED;
5157 if ( errorText_.empty() )
5163 //******************** End of __WINDOWS_WASAPI__ *********************//
5167 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5169 // Modified by Robin Davies, October 2005
5170 // - Improvements to DirectX pointer chasing.
5171 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5172 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5173 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5174 // Changed device query structure for RtAudio 4.0.7, January 2010
5176 #include <windows.h>
5177 #include <process.h>
5178 #include <mmsystem.h>
5182 #include <algorithm>
5184 #if defined(__MINGW32__)
5185 // missing from latest mingw winapi
5186 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5187 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5188 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5189 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5192 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5194 #ifdef _MSC_VER // if Microsoft Visual C++
5195 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5198 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5200 if ( pointer > bufferSize ) pointer -= bufferSize;
5201 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5202 if ( pointer < earlierPointer ) pointer += bufferSize;
5203 return pointer >= earlierPointer && pointer < laterPointer;
5206 // A structure to hold various information related to the DirectSound
5207 // API implementation.
5209 unsigned int drainCounter; // Tracks callback counts when draining
5210 bool internalDrain; // Indicates if stop is initiated from callback or not.
5214 UINT bufferPointer[2];
5215 DWORD dsBufferSize[2];
5216 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5220 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5223 // Declarations for utility functions, callbacks, and structures
5224 // specific to the DirectSound implementation.
5225 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5226 LPCTSTR description,
5230 static const char* getErrorString( int code );
5232 static unsigned __stdcall callbackHandler( void *ptr );
5241 : found(false) { validId[0] = false; validId[1] = false; }
5244 struct DsProbeData {
5246 std::vector<struct DsDevice>* dsDevices;
5249 RtApiDs :: RtApiDs()
5251 // Dsound will run both-threaded. If CoInitialize fails, then just
5252 // accept whatever the mainline chose for a threading model.
5253 coInitialized_ = false;
5254 HRESULT hr = CoInitialize( NULL );
5255 if ( !FAILED( hr ) ) coInitialized_ = true;
5258 RtApiDs :: ~RtApiDs()
5260 if ( stream_.state != STREAM_CLOSED ) closeStream();
5261 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5264 // The DirectSound default output is always the first device.
5265 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5270 // The DirectSound default input is always the first input device,
5271 // which is the first capture device enumerated.
5272 unsigned int RtApiDs :: getDefaultInputDevice( void )
5277 unsigned int RtApiDs :: getDeviceCount( void )
5279 // Set query flag for previously found devices to false, so that we
5280 // can check for any devices that have disappeared.
5281 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5282 dsDevices[i].found = false;
5284 // Query DirectSound devices.
5285 struct DsProbeData probeInfo;
5286 probeInfo.isInput = false;
5287 probeInfo.dsDevices = &dsDevices;
5288 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5289 if ( FAILED( result ) ) {
5290 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5291 errorText_ = errorStream_.str();
5292 error( RtAudioError::WARNING );
5295 // Query DirectSoundCapture devices.
5296 probeInfo.isInput = true;
5297 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5298 if ( FAILED( result ) ) {
5299 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5300 errorText_ = errorStream_.str();
5301 error( RtAudioError::WARNING );
5304 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5305 for ( unsigned int i=0; i<dsDevices.size(); ) {
5306 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5310 return static_cast<unsigned int>(dsDevices.size());
5313 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5315 RtAudio::DeviceInfo info;
5316 info.probed = false;
5318 if ( dsDevices.size() == 0 ) {
5319 // Force a query of all devices
5321 if ( dsDevices.size() == 0 ) {
5322 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5323 error( RtAudioError::INVALID_USE );
5328 if ( device >= dsDevices.size() ) {
5329 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5330 error( RtAudioError::INVALID_USE );
5335 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5337 LPDIRECTSOUND output;
5339 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5340 if ( FAILED( result ) ) {
5341 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5342 errorText_ = errorStream_.str();
5343 error( RtAudioError::WARNING );
5347 outCaps.dwSize = sizeof( outCaps );
5348 result = output->GetCaps( &outCaps );
5349 if ( FAILED( result ) ) {
5351 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5352 errorText_ = errorStream_.str();
5353 error( RtAudioError::WARNING );
5357 // Get output channel information.
5358 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5360 // Get sample rate information.
5361 info.sampleRates.clear();
5362 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5363 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5364 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5365 info.sampleRates.push_back( SAMPLE_RATES[k] );
5367 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5368 info.preferredSampleRate = SAMPLE_RATES[k];
5372 // Get format information.
5373 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5374 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5378 if ( getDefaultOutputDevice() == device )
5379 info.isDefaultOutput = true;
5381 if ( dsDevices[ device ].validId[1] == false ) {
5382 info.name = dsDevices[ device ].name;
5389 LPDIRECTSOUNDCAPTURE input;
5390 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5391 if ( FAILED( result ) ) {
5392 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5393 errorText_ = errorStream_.str();
5394 error( RtAudioError::WARNING );
5399 inCaps.dwSize = sizeof( inCaps );
5400 result = input->GetCaps( &inCaps );
5401 if ( FAILED( result ) ) {
5403 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5404 errorText_ = errorStream_.str();
5405 error( RtAudioError::WARNING );
5409 // Get input channel information.
5410 info.inputChannels = inCaps.dwChannels;
5412 // Get sample rate and format information.
5413 std::vector<unsigned int> rates;
5414 if ( inCaps.dwChannels >= 2 ) {
5415 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5416 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5417 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5418 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5419 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5420 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5421 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5422 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5424 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5425 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5426 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5427 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5428 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5430 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5431 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5432 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5433 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5434 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5437 else if ( inCaps.dwChannels == 1 ) {
5438 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5439 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5440 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5441 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5442 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5443 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5444 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5445 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5447 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5448 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5449 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5450 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5451 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5453 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5454 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5455 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5456 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5457 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5460 else info.inputChannels = 0; // technically, this would be an error
5464 if ( info.inputChannels == 0 ) return info;
5466 // Copy the supported rates to the info structure but avoid duplication.
5468 for ( unsigned int i=0; i<rates.size(); i++ ) {
5470 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5471 if ( rates[i] == info.sampleRates[j] ) {
5476 if ( found == false ) info.sampleRates.push_back( rates[i] );
5478 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5480 // If device opens for both playback and capture, we determine the channels.
5481 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5482 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5484 if ( device == 0 ) info.isDefaultInput = true;
5486 // Copy name and return.
5487 info.name = dsDevices[ device ].name;
5492 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5493 unsigned int firstChannel, unsigned int sampleRate,
5494 RtAudioFormat format, unsigned int *bufferSize,
5495 RtAudio::StreamOptions *options )
5497 if ( channels + firstChannel > 2 ) {
5498 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5502 size_t nDevices = dsDevices.size();
5503 if ( nDevices == 0 ) {
5504 // This should not happen because a check is made before this function is called.
5505 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5509 if ( device >= nDevices ) {
5510 // This should not happen because a check is made before this function is called.
5511 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5515 if ( mode == OUTPUT ) {
5516 if ( dsDevices[ device ].validId[0] == false ) {
5517 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5518 errorText_ = errorStream_.str();
5522 else { // mode == INPUT
5523 if ( dsDevices[ device ].validId[1] == false ) {
5524 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5525 errorText_ = errorStream_.str();
5530 // According to a note in PortAudio, using GetDesktopWindow()
5531 // instead of GetForegroundWindow() is supposed to avoid problems
5532 // that occur when the application's window is not the foreground
5533 // window. Also, if the application window closes before the
5534 // DirectSound buffer, DirectSound can crash. In the past, I had
5535 // problems when using GetDesktopWindow() but it seems fine now
5536 // (January 2010). I'll leave it commented here.
5537 // HWND hWnd = GetForegroundWindow();
5538 HWND hWnd = GetDesktopWindow();
5540 // Check the numberOfBuffers parameter and limit the lowest value to
5541 // two. This is a judgement call and a value of two is probably too
5542 // low for capture, but it should work for playback.
5544 if ( options ) nBuffers = options->numberOfBuffers;
5545 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5546 if ( nBuffers < 2 ) nBuffers = 3;
5548 // Check the lower range of the user-specified buffer size and set
5549 // (arbitrarily) to a lower bound of 32.
5550 if ( *bufferSize < 32 ) *bufferSize = 32;
5552 // Create the wave format structure. The data format setting will
5553 // be determined later.
5554 WAVEFORMATEX waveFormat;
5555 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5556 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5557 waveFormat.nChannels = channels + firstChannel;
5558 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5560 // Determine the device buffer size. By default, we'll use the value
5561 // defined above (32K), but we will grow it to make allowances for
5562 // very large software buffer sizes.
5563 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5564 DWORD dsPointerLeadTime = 0;
5566 void *ohandle = 0, *bhandle = 0;
5568 if ( mode == OUTPUT ) {
5570 LPDIRECTSOUND output;
5571 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5572 if ( FAILED( result ) ) {
5573 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5574 errorText_ = errorStream_.str();
5579 outCaps.dwSize = sizeof( outCaps );
5580 result = output->GetCaps( &outCaps );
5581 if ( FAILED( result ) ) {
5583 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5584 errorText_ = errorStream_.str();
5588 // Check channel information.
5589 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5590 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5591 errorText_ = errorStream_.str();
5595 // Check format information. Use 16-bit format unless not
5596 // supported or user requests 8-bit.
5597 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5598 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5599 waveFormat.wBitsPerSample = 16;
5600 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5603 waveFormat.wBitsPerSample = 8;
5604 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5606 stream_.userFormat = format;
5608 // Update wave format structure and buffer information.
5609 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5610 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5611 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5613 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5614 while ( dsPointerLeadTime * 2U > dsBufferSize )
5617 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5618 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5619 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5620 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5621 if ( FAILED( result ) ) {
5623 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5624 errorText_ = errorStream_.str();
5628 // Even though we will write to the secondary buffer, we need to
5629 // access the primary buffer to set the correct output format
5630 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5631 // buffer description.
5632 DSBUFFERDESC bufferDescription;
5633 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5634 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5635 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5637 // Obtain the primary buffer
5638 LPDIRECTSOUNDBUFFER buffer;
5639 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5640 if ( FAILED( result ) ) {
5642 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5643 errorText_ = errorStream_.str();
5647 // Set the primary DS buffer sound format.
5648 result = buffer->SetFormat( &waveFormat );
5649 if ( FAILED( result ) ) {
5651 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5652 errorText_ = errorStream_.str();
5656 // Setup the secondary DS buffer description.
5657 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5658 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5659 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5660 DSBCAPS_GLOBALFOCUS |
5661 DSBCAPS_GETCURRENTPOSITION2 |
5662 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5663 bufferDescription.dwBufferBytes = dsBufferSize;
5664 bufferDescription.lpwfxFormat = &waveFormat;
5666 // Try to create the secondary DS buffer. If that doesn't work,
5667 // try to use software mixing. Otherwise, there's a problem.
5668 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5669 if ( FAILED( result ) ) {
5670 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5671 DSBCAPS_GLOBALFOCUS |
5672 DSBCAPS_GETCURRENTPOSITION2 |
5673 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5674 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5675 if ( FAILED( result ) ) {
5677 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5678 errorText_ = errorStream_.str();
5683 // Get the buffer size ... might be different from what we specified.
5685 dsbcaps.dwSize = sizeof( DSBCAPS );
5686 result = buffer->GetCaps( &dsbcaps );
5687 if ( FAILED( result ) ) {
5690 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5691 errorText_ = errorStream_.str();
5695 dsBufferSize = dsbcaps.dwBufferBytes;
5697 // Lock the DS buffer
5700 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5701 if ( FAILED( result ) ) {
5704 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5705 errorText_ = errorStream_.str();
5709 // Zero the DS buffer
5710 ZeroMemory( audioPtr, dataLen );
5712 // Unlock the DS buffer
5713 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5714 if ( FAILED( result ) ) {
5717 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5718 errorText_ = errorStream_.str();
5722 ohandle = (void *) output;
5723 bhandle = (void *) buffer;
5726 if ( mode == INPUT ) {
5728 LPDIRECTSOUNDCAPTURE input;
5729 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5730 if ( FAILED( result ) ) {
5731 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5732 errorText_ = errorStream_.str();
5737 inCaps.dwSize = sizeof( inCaps );
5738 result = input->GetCaps( &inCaps );
5739 if ( FAILED( result ) ) {
5741 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5742 errorText_ = errorStream_.str();
5746 // Check channel information.
5747 if ( inCaps.dwChannels < channels + firstChannel ) {
5748 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5752 // Check format information. Use 16-bit format unless user
5754 DWORD deviceFormats;
5755 if ( channels + firstChannel == 2 ) {
5756 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5757 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5758 waveFormat.wBitsPerSample = 8;
5759 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5761 else { // assume 16-bit is supported
5762 waveFormat.wBitsPerSample = 16;
5763 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5766 else { // channel == 1
5767 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5768 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5769 waveFormat.wBitsPerSample = 8;
5770 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5772 else { // assume 16-bit is supported
5773 waveFormat.wBitsPerSample = 16;
5774 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5777 stream_.userFormat = format;
5779 // Update wave format structure and buffer information.
5780 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5781 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5782 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5784 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5785 while ( dsPointerLeadTime * 2U > dsBufferSize )
5788 // Setup the secondary DS buffer description.
5789 DSCBUFFERDESC bufferDescription;
5790 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5791 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5792 bufferDescription.dwFlags = 0;
5793 bufferDescription.dwReserved = 0;
5794 bufferDescription.dwBufferBytes = dsBufferSize;
5795 bufferDescription.lpwfxFormat = &waveFormat;
5797 // Create the capture buffer.
5798 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5799 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5800 if ( FAILED( result ) ) {
5802 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5803 errorText_ = errorStream_.str();
5807 // Get the buffer size ... might be different from what we specified.
5809 dscbcaps.dwSize = sizeof( DSCBCAPS );
5810 result = buffer->GetCaps( &dscbcaps );
5811 if ( FAILED( result ) ) {
5814 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5815 errorText_ = errorStream_.str();
5819 dsBufferSize = dscbcaps.dwBufferBytes;
5821 // NOTE: We could have a problem here if this is a duplex stream
5822 // and the play and capture hardware buffer sizes are different
5823 // (I'm actually not sure if that is a problem or not).
5824 // Currently, we are not verifying that.
5826 // Lock the capture buffer
5829 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5830 if ( FAILED( result ) ) {
5833 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5834 errorText_ = errorStream_.str();
5839 ZeroMemory( audioPtr, dataLen );
5841 // Unlock the buffer
5842 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5843 if ( FAILED( result ) ) {
5846 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5847 errorText_ = errorStream_.str();
5851 ohandle = (void *) input;
5852 bhandle = (void *) buffer;
5855 // Set various stream parameters
5856 DsHandle *handle = 0;
5857 stream_.nDeviceChannels[mode] = channels + firstChannel;
5858 stream_.nUserChannels[mode] = channels;
5859 stream_.bufferSize = *bufferSize;
5860 stream_.channelOffset[mode] = firstChannel;
5861 stream_.deviceInterleaved[mode] = true;
5862 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5863 else stream_.userInterleaved = true;
5865 // Set flag for buffer conversion
5866 stream_.doConvertBuffer[mode] = false;
5867 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5868 stream_.doConvertBuffer[mode] = true;
5869 if (stream_.userFormat != stream_.deviceFormat[mode])
5870 stream_.doConvertBuffer[mode] = true;
5871 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5872 stream_.nUserChannels[mode] > 1 )
5873 stream_.doConvertBuffer[mode] = true;
5875 // Allocate necessary internal buffers
5876 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5877 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5878 if ( stream_.userBuffer[mode] == NULL ) {
5879 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5883 if ( stream_.doConvertBuffer[mode] ) {
5885 bool makeBuffer = true;
5886 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5887 if ( mode == INPUT ) {
5888 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5889 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5890 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5895 bufferBytes *= *bufferSize;
5896 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5897 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5898 if ( stream_.deviceBuffer == NULL ) {
5899 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5905 // Allocate our DsHandle structures for the stream.
5906 if ( stream_.apiHandle == 0 ) {
5908 handle = new DsHandle;
5910 catch ( std::bad_alloc& ) {
5911 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5915 // Create a manual-reset event.
5916 handle->condition = CreateEvent( NULL, // no security
5917 TRUE, // manual-reset
5918 FALSE, // non-signaled initially
5920 stream_.apiHandle = (void *) handle;
5923 handle = (DsHandle *) stream_.apiHandle;
5924 handle->id[mode] = ohandle;
5925 handle->buffer[mode] = bhandle;
5926 handle->dsBufferSize[mode] = dsBufferSize;
5927 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5929 stream_.device[mode] = device;
5930 stream_.state = STREAM_STOPPED;
5931 if ( stream_.mode == OUTPUT && mode == INPUT )
5932 // We had already set up an output stream.
5933 stream_.mode = DUPLEX;
5935 stream_.mode = mode;
5936 stream_.nBuffers = nBuffers;
5937 stream_.sampleRate = sampleRate;
5939 // Setup the buffer conversion information structure.
5940 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5942 // Setup the callback thread.
5943 if ( stream_.callbackInfo.isRunning == false ) {
5945 stream_.callbackInfo.isRunning = true;
5946 stream_.callbackInfo.object = (void *) this;
5947 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5948 &stream_.callbackInfo, 0, &threadId );
5949 if ( stream_.callbackInfo.thread == 0 ) {
5950 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5954 // Boost DS thread priority
5955 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5961 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5962 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5963 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5964 if ( buffer ) buffer->Release();
5967 if ( handle->buffer[1] ) {
5968 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5969 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5970 if ( buffer ) buffer->Release();
5973 CloseHandle( handle->condition );
5975 stream_.apiHandle = 0;
5978 for ( int i=0; i<2; i++ ) {
5979 if ( stream_.userBuffer[i] ) {
5980 free( stream_.userBuffer[i] );
5981 stream_.userBuffer[i] = 0;
5985 if ( stream_.deviceBuffer ) {
5986 free( stream_.deviceBuffer );
5987 stream_.deviceBuffer = 0;
5990 stream_.state = STREAM_CLOSED;
5994 void RtApiDs :: closeStream()
5996 if ( stream_.state == STREAM_CLOSED ) {
5997 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
5998 error( RtAudioError::WARNING );
6002 // Stop the callback thread.
6003 stream_.callbackInfo.isRunning = false;
6004 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6005 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6007 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6009 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6010 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6011 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6018 if ( handle->buffer[1] ) {
6019 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6020 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6027 CloseHandle( handle->condition );
6029 stream_.apiHandle = 0;
6032 for ( int i=0; i<2; i++ ) {
6033 if ( stream_.userBuffer[i] ) {
6034 free( stream_.userBuffer[i] );
6035 stream_.userBuffer[i] = 0;
6039 if ( stream_.deviceBuffer ) {
6040 free( stream_.deviceBuffer );
6041 stream_.deviceBuffer = 0;
6044 stream_.mode = UNINITIALIZED;
6045 stream_.state = STREAM_CLOSED;
6048 void RtApiDs :: startStream()
6051 if ( stream_.state == STREAM_RUNNING ) {
6052 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6053 error( RtAudioError::WARNING );
6057 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6059 // Increase scheduler frequency on lesser windows (a side-effect of
6060 // increasing timer accuracy). On greater windows (Win2K or later),
6061 // this is already in effect.
6062 timeBeginPeriod( 1 );
6064 buffersRolling = false;
6065 duplexPrerollBytes = 0;
6067 if ( stream_.mode == DUPLEX ) {
6068 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6069 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6073 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6075 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6076 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6077 if ( FAILED( result ) ) {
6078 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6079 errorText_ = errorStream_.str();
6084 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6086 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6087 result = buffer->Start( DSCBSTART_LOOPING );
6088 if ( FAILED( result ) ) {
6089 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6090 errorText_ = errorStream_.str();
6095 handle->drainCounter = 0;
6096 handle->internalDrain = false;
6097 ResetEvent( handle->condition );
6098 stream_.state = STREAM_RUNNING;
6101 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6104 void RtApiDs :: stopStream()
6107 if ( stream_.state == STREAM_STOPPED ) {
6108 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6109 error( RtAudioError::WARNING );
6116 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6117 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6118 if ( handle->drainCounter == 0 ) {
6119 handle->drainCounter = 2;
6120 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6123 stream_.state = STREAM_STOPPED;
6125 MUTEX_LOCK( &stream_.mutex );
6127 // Stop the buffer and clear memory
6128 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6129 result = buffer->Stop();
6130 if ( FAILED( result ) ) {
6131 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6132 errorText_ = errorStream_.str();
6136 // Lock the buffer and clear it so that if we start to play again,
6137 // we won't have old data playing.
6138 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6139 if ( FAILED( result ) ) {
6140 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6141 errorText_ = errorStream_.str();
6145 // Zero the DS buffer
6146 ZeroMemory( audioPtr, dataLen );
6148 // Unlock the DS buffer
6149 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6150 if ( FAILED( result ) ) {
6151 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6152 errorText_ = errorStream_.str();
6156 // If we start playing again, we must begin at beginning of buffer.
6157 handle->bufferPointer[0] = 0;
6160 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6161 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6165 stream_.state = STREAM_STOPPED;
6167 if ( stream_.mode != DUPLEX )
6168 MUTEX_LOCK( &stream_.mutex );
6170 result = buffer->Stop();
6171 if ( FAILED( result ) ) {
6172 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6173 errorText_ = errorStream_.str();
6177 // Lock the buffer and clear it so that if we start to play again,
6178 // we won't have old data playing.
6179 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6180 if ( FAILED( result ) ) {
6181 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6182 errorText_ = errorStream_.str();
6186 // Zero the DS buffer
6187 ZeroMemory( audioPtr, dataLen );
6189 // Unlock the DS buffer
6190 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6191 if ( FAILED( result ) ) {
6192 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6193 errorText_ = errorStream_.str();
6197 // If we start recording again, we must begin at beginning of buffer.
6198 handle->bufferPointer[1] = 0;
6202 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6203 MUTEX_UNLOCK( &stream_.mutex );
6205 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6208 void RtApiDs :: abortStream()
6211 if ( stream_.state == STREAM_STOPPED ) {
6212 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6213 error( RtAudioError::WARNING );
6217 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6218 handle->drainCounter = 2;
6223 void RtApiDs :: callbackEvent()
6225 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6226 Sleep( 50 ); // sleep 50 milliseconds
6230 if ( stream_.state == STREAM_CLOSED ) {
6231 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6232 error( RtAudioError::WARNING );
6236 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6237 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6239 // Check if we were draining the stream and signal is finished.
6240 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6242 stream_.state = STREAM_STOPPING;
6243 if ( handle->internalDrain == false )
6244 SetEvent( handle->condition );
6250 // Invoke user callback to get fresh output data UNLESS we are
6252 if ( handle->drainCounter == 0 ) {
6253 RtAudioCallback callback = (RtAudioCallback) info->callback;
6254 double streamTime = getStreamTime();
6255 RtAudioStreamStatus status = 0;
6256 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6257 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6258 handle->xrun[0] = false;
6260 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6261 status |= RTAUDIO_INPUT_OVERFLOW;
6262 handle->xrun[1] = false;
6264 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6265 stream_.bufferSize, streamTime, status, info->userData );
6266 if ( cbReturnValue == 2 ) {
6267 stream_.state = STREAM_STOPPING;
6268 handle->drainCounter = 2;
6272 else if ( cbReturnValue == 1 ) {
6273 handle->drainCounter = 1;
6274 handle->internalDrain = true;
6279 DWORD currentWritePointer, safeWritePointer;
6280 DWORD currentReadPointer, safeReadPointer;
6281 UINT nextWritePointer;
6283 LPVOID buffer1 = NULL;
6284 LPVOID buffer2 = NULL;
6285 DWORD bufferSize1 = 0;
6286 DWORD bufferSize2 = 0;
6291 MUTEX_LOCK( &stream_.mutex );
6292 if ( stream_.state == STREAM_STOPPED ) {
6293 MUTEX_UNLOCK( &stream_.mutex );
6297 if ( buffersRolling == false ) {
6298 if ( stream_.mode == DUPLEX ) {
6299 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6301 // It takes a while for the devices to get rolling. As a result,
6302 // there's no guarantee that the capture and write device pointers
6303 // will move in lockstep. Wait here for both devices to start
6304 // rolling, and then set our buffer pointers accordingly.
6305 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6306 // bytes later than the write buffer.
6308 // Stub: a serious risk of having a pre-emptive scheduling round
6309 // take place between the two GetCurrentPosition calls... but I'm
6310 // really not sure how to solve the problem. Temporarily boost to
6311 // Realtime priority, maybe; but I'm not sure what priority the
6312 // DirectSound service threads run at. We *should* be roughly
6313 // within a ms or so of correct.
6315 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6316 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6318 DWORD startSafeWritePointer, startSafeReadPointer;
6320 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6321 if ( FAILED( result ) ) {
6322 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6323 errorText_ = errorStream_.str();
6324 MUTEX_UNLOCK( &stream_.mutex );
6325 error( RtAudioError::SYSTEM_ERROR );
6328 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6329 if ( FAILED( result ) ) {
6330 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6331 errorText_ = errorStream_.str();
6332 MUTEX_UNLOCK( &stream_.mutex );
6333 error( RtAudioError::SYSTEM_ERROR );
6337 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6338 if ( FAILED( result ) ) {
6339 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6340 errorText_ = errorStream_.str();
6341 MUTEX_UNLOCK( &stream_.mutex );
6342 error( RtAudioError::SYSTEM_ERROR );
6345 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6346 if ( FAILED( result ) ) {
6347 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6348 errorText_ = errorStream_.str();
6349 MUTEX_UNLOCK( &stream_.mutex );
6350 error( RtAudioError::SYSTEM_ERROR );
6353 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6357 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6359 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6360 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6361 handle->bufferPointer[1] = safeReadPointer;
6363 else if ( stream_.mode == OUTPUT ) {
6365 // Set the proper nextWritePosition after initial startup.
6366 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6367 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6368 if ( FAILED( result ) ) {
6369 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6370 errorText_ = errorStream_.str();
6371 MUTEX_UNLOCK( &stream_.mutex );
6372 error( RtAudioError::SYSTEM_ERROR );
6375 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6376 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6379 buffersRolling = true;
6382 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6384 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6386 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6387 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6388 bufferBytes *= formatBytes( stream_.userFormat );
6389 memset( stream_.userBuffer[0], 0, bufferBytes );
6392 // Setup parameters and do buffer conversion if necessary.
6393 if ( stream_.doConvertBuffer[0] ) {
6394 buffer = stream_.deviceBuffer;
6395 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6396 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6397 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6400 buffer = stream_.userBuffer[0];
6401 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6402 bufferBytes *= formatBytes( stream_.userFormat );
6405 // No byte swapping necessary in DirectSound implementation.
6407 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6408 // unsigned. So, we need to convert our signed 8-bit data here to
6410 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6411 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6413 DWORD dsBufferSize = handle->dsBufferSize[0];
6414 nextWritePointer = handle->bufferPointer[0];
6416 DWORD endWrite, leadPointer;
6418 // Find out where the read and "safe write" pointers are.
6419 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6420 if ( FAILED( result ) ) {
6421 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6422 errorText_ = errorStream_.str();
6423 MUTEX_UNLOCK( &stream_.mutex );
6424 error( RtAudioError::SYSTEM_ERROR );
6428 // We will copy our output buffer into the region between
6429 // safeWritePointer and leadPointer. If leadPointer is not
6430 // beyond the next endWrite position, wait until it is.
6431 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6432 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6433 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6434 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6435 endWrite = nextWritePointer + bufferBytes;
6437 // Check whether the entire write region is behind the play pointer.
6438 if ( leadPointer >= endWrite ) break;
6440 // If we are here, then we must wait until the leadPointer advances
6441 // beyond the end of our next write region. We use the
6442 // Sleep() function to suspend operation until that happens.
6443 double millis = ( endWrite - leadPointer ) * 1000.0;
6444 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6445 if ( millis < 1.0 ) millis = 1.0;
6446 Sleep( (DWORD) millis );
6449 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6450 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6451 // We've strayed into the forbidden zone ... resync the read pointer.
6452 handle->xrun[0] = true;
6453 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6454 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6455 handle->bufferPointer[0] = nextWritePointer;
6456 endWrite = nextWritePointer + bufferBytes;
6459 // Lock free space in the buffer
6460 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6461 &bufferSize1, &buffer2, &bufferSize2, 0 );
6462 if ( FAILED( result ) ) {
6463 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6464 errorText_ = errorStream_.str();
6465 MUTEX_UNLOCK( &stream_.mutex );
6466 error( RtAudioError::SYSTEM_ERROR );
6470 // Copy our buffer into the DS buffer
6471 CopyMemory( buffer1, buffer, bufferSize1 );
6472 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6474 // Update our buffer offset and unlock sound buffer
6475 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6476 if ( FAILED( result ) ) {
6477 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6478 errorText_ = errorStream_.str();
6479 MUTEX_UNLOCK( &stream_.mutex );
6480 error( RtAudioError::SYSTEM_ERROR );
6483 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6484 handle->bufferPointer[0] = nextWritePointer;
6487 // Don't bother draining input
6488 if ( handle->drainCounter ) {
6489 handle->drainCounter++;
6493 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6495 // Setup parameters.
6496 if ( stream_.doConvertBuffer[1] ) {
6497 buffer = stream_.deviceBuffer;
6498 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6499 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6502 buffer = stream_.userBuffer[1];
6503 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6504 bufferBytes *= formatBytes( stream_.userFormat );
6507 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6508 long nextReadPointer = handle->bufferPointer[1];
6509 DWORD dsBufferSize = handle->dsBufferSize[1];
6511 // Find out where the write and "safe read" pointers are.
6512 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6513 if ( FAILED( result ) ) {
6514 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6515 errorText_ = errorStream_.str();
6516 MUTEX_UNLOCK( &stream_.mutex );
6517 error( RtAudioError::SYSTEM_ERROR );
6521 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6522 DWORD endRead = nextReadPointer + bufferBytes;
6524 // Handling depends on whether we are INPUT or DUPLEX.
6525 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6526 // then a wait here will drag the write pointers into the forbidden zone.
6528 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6529 // it's in a safe position. This causes dropouts, but it seems to be the only
6530 // practical way to sync up the read and write pointers reliably, given the
6531 // the very complex relationship between phase and increment of the read and write
6534 // In order to minimize audible dropouts in DUPLEX mode, we will
6535 // provide a pre-roll period of 0.5 seconds in which we return
6536 // zeros from the read buffer while the pointers sync up.
6538 if ( stream_.mode == DUPLEX ) {
6539 if ( safeReadPointer < endRead ) {
6540 if ( duplexPrerollBytes <= 0 ) {
6541 // Pre-roll time over. Be more agressive.
6542 int adjustment = endRead-safeReadPointer;
6544 handle->xrun[1] = true;
6546 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6547 // and perform fine adjustments later.
6548 // - small adjustments: back off by twice as much.
6549 if ( adjustment >= 2*bufferBytes )
6550 nextReadPointer = safeReadPointer-2*bufferBytes;
6552 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6554 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6558 // In pre=roll time. Just do it.
6559 nextReadPointer = safeReadPointer - bufferBytes;
6560 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6562 endRead = nextReadPointer + bufferBytes;
6565 else { // mode == INPUT
6566 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6567 // See comments for playback.
6568 double millis = (endRead - safeReadPointer) * 1000.0;
6569 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6570 if ( millis < 1.0 ) millis = 1.0;
6571 Sleep( (DWORD) millis );
6573 // Wake up and find out where we are now.
6574 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6575 if ( FAILED( result ) ) {
6576 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6577 errorText_ = errorStream_.str();
6578 MUTEX_UNLOCK( &stream_.mutex );
6579 error( RtAudioError::SYSTEM_ERROR );
6583 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6587 // Lock free space in the buffer
6588 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6589 &bufferSize1, &buffer2, &bufferSize2, 0 );
6590 if ( FAILED( result ) ) {
6591 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6592 errorText_ = errorStream_.str();
6593 MUTEX_UNLOCK( &stream_.mutex );
6594 error( RtAudioError::SYSTEM_ERROR );
6598 if ( duplexPrerollBytes <= 0 ) {
6599 // Copy our buffer into the DS buffer
6600 CopyMemory( buffer, buffer1, bufferSize1 );
6601 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6604 memset( buffer, 0, bufferSize1 );
6605 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6606 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6609 // Update our buffer offset and unlock sound buffer
6610 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6611 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6612 if ( FAILED( result ) ) {
6613 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6614 errorText_ = errorStream_.str();
6615 MUTEX_UNLOCK( &stream_.mutex );
6616 error( RtAudioError::SYSTEM_ERROR );
6619 handle->bufferPointer[1] = nextReadPointer;
6621 // No byte swapping necessary in DirectSound implementation.
6623 // If necessary, convert 8-bit data from unsigned to signed.
6624 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6625 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6627 // Do buffer conversion if necessary.
6628 if ( stream_.doConvertBuffer[1] )
6629 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6633 MUTEX_UNLOCK( &stream_.mutex );
6634 RtApi::tickStreamTime();
6637 // Definitions for utility functions and callbacks
6638 // specific to the DirectSound implementation.
6640 static unsigned __stdcall callbackHandler( void *ptr )
6642 CallbackInfo *info = (CallbackInfo *) ptr;
6643 RtApiDs *object = (RtApiDs *) info->object;
6644 bool* isRunning = &info->isRunning;
6646 while ( *isRunning == true ) {
6647 object->callbackEvent();
6654 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6655 LPCTSTR description,
6659 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6660 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6663 bool validDevice = false;
6664 if ( probeInfo.isInput == true ) {
6666 LPDIRECTSOUNDCAPTURE object;
6668 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6669 if ( hr != DS_OK ) return TRUE;
6671 caps.dwSize = sizeof(caps);
6672 hr = object->GetCaps( &caps );
6673 if ( hr == DS_OK ) {
6674 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6681 LPDIRECTSOUND object;
6682 hr = DirectSoundCreate( lpguid, &object, NULL );
6683 if ( hr != DS_OK ) return TRUE;
6685 caps.dwSize = sizeof(caps);
6686 hr = object->GetCaps( &caps );
6687 if ( hr == DS_OK ) {
6688 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6694 // If good device, then save its name and guid.
6695 std::string name = convertCharPointerToStdString( description );
6696 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6697 if ( lpguid == NULL )
6698 name = "Default Device";
6699 if ( validDevice ) {
6700 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6701 if ( dsDevices[i].name == name ) {
6702 dsDevices[i].found = true;
6703 if ( probeInfo.isInput ) {
6704 dsDevices[i].id[1] = lpguid;
6705 dsDevices[i].validId[1] = true;
6708 dsDevices[i].id[0] = lpguid;
6709 dsDevices[i].validId[0] = true;
6717 device.found = true;
6718 if ( probeInfo.isInput ) {
6719 device.id[1] = lpguid;
6720 device.validId[1] = true;
6723 device.id[0] = lpguid;
6724 device.validId[0] = true;
6726 dsDevices.push_back( device );
6732 static const char* getErrorString( int code )
6736 case DSERR_ALLOCATED:
6737 return "Already allocated";
6739 case DSERR_CONTROLUNAVAIL:
6740 return "Control unavailable";
6742 case DSERR_INVALIDPARAM:
6743 return "Invalid parameter";
6745 case DSERR_INVALIDCALL:
6746 return "Invalid call";
6749 return "Generic error";
6751 case DSERR_PRIOLEVELNEEDED:
6752 return "Priority level needed";
6754 case DSERR_OUTOFMEMORY:
6755 return "Out of memory";
6757 case DSERR_BADFORMAT:
6758 return "The sample rate or the channel format is not supported";
6760 case DSERR_UNSUPPORTED:
6761 return "Not supported";
6763 case DSERR_NODRIVER:
6766 case DSERR_ALREADYINITIALIZED:
6767 return "Already initialized";
6769 case DSERR_NOAGGREGATION:
6770 return "No aggregation";
6772 case DSERR_BUFFERLOST:
6773 return "Buffer lost";
6775 case DSERR_OTHERAPPHASPRIO:
6776 return "Another application already has priority";
6778 case DSERR_UNINITIALIZED:
6779 return "Uninitialized";
6782 return "DirectSound unknown error";
6785 //******************** End of __WINDOWS_DS__ *********************//
6789 #if defined(__LINUX_ALSA__)
6791 #include <alsa/asoundlib.h>
6794 // A structure to hold various information related to the ALSA API
6797 snd_pcm_t *handles[2];
6800 pthread_cond_t runnable_cv;
6804 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6807 static void *alsaCallbackHandler( void * ptr );
6809 RtApiAlsa :: RtApiAlsa()
6811 // Nothing to do here.
6814 RtApiAlsa :: ~RtApiAlsa()
6816 if ( stream_.state != STREAM_CLOSED ) closeStream();
6819 unsigned int RtApiAlsa :: getDeviceCount( void )
6821 unsigned nDevices = 0;
6822 int result, subdevice, card;
6826 // Count cards and devices
6828 snd_card_next( &card );
6829 while ( card >= 0 ) {
6830 sprintf( name, "hw:%d", card );
6831 result = snd_ctl_open( &handle, name, 0 );
6833 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6834 errorText_ = errorStream_.str();
6835 error( RtAudioError::WARNING );
6840 result = snd_ctl_pcm_next_device( handle, &subdevice );
6842 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6843 errorText_ = errorStream_.str();
6844 error( RtAudioError::WARNING );
6847 if ( subdevice < 0 )
6852 snd_ctl_close( handle );
6853 snd_card_next( &card );
6856 result = snd_ctl_open( &handle, "default", 0 );
6859 snd_ctl_close( handle );
6865 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6867 RtAudio::DeviceInfo info;
6868 info.probed = false;
6870 unsigned nDevices = 0;
6871 int result, subdevice, card;
6875 // Count cards and devices
6878 snd_card_next( &card );
6879 while ( card >= 0 ) {
6880 sprintf( name, "hw:%d", card );
6881 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6883 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6884 errorText_ = errorStream_.str();
6885 error( RtAudioError::WARNING );
6890 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6892 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6893 errorText_ = errorStream_.str();
6894 error( RtAudioError::WARNING );
6897 if ( subdevice < 0 ) break;
6898 if ( nDevices == device ) {
6899 sprintf( name, "hw:%d,%d", card, subdevice );
6905 snd_ctl_close( chandle );
6906 snd_card_next( &card );
6909 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6910 if ( result == 0 ) {
6911 if ( nDevices == device ) {
6912 strcpy( name, "default" );
6918 if ( nDevices == 0 ) {
6919 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6920 error( RtAudioError::INVALID_USE );
6924 if ( device >= nDevices ) {
6925 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6926 error( RtAudioError::INVALID_USE );
6932 // If a stream is already open, we cannot probe the stream devices.
6933 // Thus, use the saved results.
6934 if ( stream_.state != STREAM_CLOSED &&
6935 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6936 snd_ctl_close( chandle );
6937 if ( device >= devices_.size() ) {
6938 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6939 error( RtAudioError::WARNING );
6942 return devices_[ device ];
6945 int openMode = SND_PCM_ASYNC;
6946 snd_pcm_stream_t stream;
6947 snd_pcm_info_t *pcminfo;
6948 snd_pcm_info_alloca( &pcminfo );
6950 snd_pcm_hw_params_t *params;
6951 snd_pcm_hw_params_alloca( ¶ms );
6953 // First try for playback unless default device (which has subdev -1)
6954 stream = SND_PCM_STREAM_PLAYBACK;
6955 snd_pcm_info_set_stream( pcminfo, stream );
6956 if ( subdevice != -1 ) {
6957 snd_pcm_info_set_device( pcminfo, subdevice );
6958 snd_pcm_info_set_subdevice( pcminfo, 0 );
6960 result = snd_ctl_pcm_info( chandle, pcminfo );
6962 // Device probably doesn't support playback.
6967 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6969 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6970 errorText_ = errorStream_.str();
6971 error( RtAudioError::WARNING );
6975 // The device is open ... fill the parameter structure.
6976 result = snd_pcm_hw_params_any( phandle, params );
6978 snd_pcm_close( phandle );
6979 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6980 errorText_ = errorStream_.str();
6981 error( RtAudioError::WARNING );
6985 // Get output channel information.
6987 result = snd_pcm_hw_params_get_channels_max( params, &value );
6989 snd_pcm_close( phandle );
6990 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6991 errorText_ = errorStream_.str();
6992 error( RtAudioError::WARNING );
6995 info.outputChannels = value;
6996 snd_pcm_close( phandle );
6999 stream = SND_PCM_STREAM_CAPTURE;
7000 snd_pcm_info_set_stream( pcminfo, stream );
7002 // Now try for capture unless default device (with subdev = -1)
7003 if ( subdevice != -1 ) {
7004 result = snd_ctl_pcm_info( chandle, pcminfo );
7005 snd_ctl_close( chandle );
7007 // Device probably doesn't support capture.
7008 if ( info.outputChannels == 0 ) return info;
7009 goto probeParameters;
7013 snd_ctl_close( chandle );
7015 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7017 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7018 errorText_ = errorStream_.str();
7019 error( RtAudioError::WARNING );
7020 if ( info.outputChannels == 0 ) return info;
7021 goto probeParameters;
7024 // The device is open ... fill the parameter structure.
7025 result = snd_pcm_hw_params_any( phandle, params );
7027 snd_pcm_close( phandle );
7028 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7029 errorText_ = errorStream_.str();
7030 error( RtAudioError::WARNING );
7031 if ( info.outputChannels == 0 ) return info;
7032 goto probeParameters;
7035 result = snd_pcm_hw_params_get_channels_max( params, &value );
7037 snd_pcm_close( phandle );
7038 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7039 errorText_ = errorStream_.str();
7040 error( RtAudioError::WARNING );
7041 if ( info.outputChannels == 0 ) return info;
7042 goto probeParameters;
7044 info.inputChannels = value;
7045 snd_pcm_close( phandle );
7047 // If device opens for both playback and capture, we determine the channels.
7048 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7049 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7051 // ALSA doesn't provide default devices so we'll use the first available one.
7052 if ( device == 0 && info.outputChannels > 0 )
7053 info.isDefaultOutput = true;
7054 if ( device == 0 && info.inputChannels > 0 )
7055 info.isDefaultInput = true;
7058 // At this point, we just need to figure out the supported data
7059 // formats and sample rates. We'll proceed by opening the device in
7060 // the direction with the maximum number of channels, or playback if
7061 // they are equal. This might limit our sample rate options, but so
7064 if ( info.outputChannels >= info.inputChannels )
7065 stream = SND_PCM_STREAM_PLAYBACK;
7067 stream = SND_PCM_STREAM_CAPTURE;
7068 snd_pcm_info_set_stream( pcminfo, stream );
7070 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7072 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7073 errorText_ = errorStream_.str();
7074 error( RtAudioError::WARNING );
7078 // The device is open ... fill the parameter structure.
7079 result = snd_pcm_hw_params_any( phandle, params );
7081 snd_pcm_close( phandle );
7082 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7083 errorText_ = errorStream_.str();
7084 error( RtAudioError::WARNING );
7088 // Test our discrete set of sample rate values.
7089 info.sampleRates.clear();
7090 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7091 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7092 info.sampleRates.push_back( SAMPLE_RATES[i] );
7094 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7095 info.preferredSampleRate = SAMPLE_RATES[i];
7098 if ( info.sampleRates.size() == 0 ) {
7099 snd_pcm_close( phandle );
7100 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7101 errorText_ = errorStream_.str();
7102 error( RtAudioError::WARNING );
7106 // Probe the supported data formats ... we don't care about endian-ness just yet
7107 snd_pcm_format_t format;
7108 info.nativeFormats = 0;
7109 format = SND_PCM_FORMAT_S8;
7110 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7111 info.nativeFormats |= RTAUDIO_SINT8;
7112 format = SND_PCM_FORMAT_S16;
7113 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7114 info.nativeFormats |= RTAUDIO_SINT16;
7115 format = SND_PCM_FORMAT_S24;
7116 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7117 info.nativeFormats |= RTAUDIO_SINT24;
7118 format = SND_PCM_FORMAT_S32;
7119 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7120 info.nativeFormats |= RTAUDIO_SINT32;
7121 format = SND_PCM_FORMAT_FLOAT;
7122 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7123 info.nativeFormats |= RTAUDIO_FLOAT32;
7124 format = SND_PCM_FORMAT_FLOAT64;
7125 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7126 info.nativeFormats |= RTAUDIO_FLOAT64;
7128 // Check that we have at least one supported format
7129 if ( info.nativeFormats == 0 ) {
7130 snd_pcm_close( phandle );
7131 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7132 errorText_ = errorStream_.str();
7133 error( RtAudioError::WARNING );
7137 // Get the device name
7139 result = snd_card_get_name( card, &cardname );
7140 if ( result >= 0 ) {
7141 sprintf( name, "hw:%s,%d", cardname, subdevice );
7146 // That's all ... close the device and return
7147 snd_pcm_close( phandle );
7152 void RtApiAlsa :: saveDeviceInfo( void )
7156 unsigned int nDevices = getDeviceCount();
7157 devices_.resize( nDevices );
7158 for ( unsigned int i=0; i<nDevices; i++ )
7159 devices_[i] = getDeviceInfo( i );
7162 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7163 unsigned int firstChannel, unsigned int sampleRate,
7164 RtAudioFormat format, unsigned int *bufferSize,
7165 RtAudio::StreamOptions *options )
7168 #if defined(__RTAUDIO_DEBUG__)
7170 snd_output_stdio_attach(&out, stderr, 0);
7173 // I'm not using the "plug" interface ... too much inconsistent behavior.
7175 unsigned nDevices = 0;
7176 int result, subdevice, card;
7180 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7181 snprintf(name, sizeof(name), "%s", "default");
7183 // Count cards and devices
7185 snd_card_next( &card );
7186 while ( card >= 0 ) {
7187 sprintf( name, "hw:%d", card );
7188 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7190 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7191 errorText_ = errorStream_.str();
7196 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7197 if ( result < 0 ) break;
7198 if ( subdevice < 0 ) break;
7199 if ( nDevices == device ) {
7200 sprintf( name, "hw:%d,%d", card, subdevice );
7201 snd_ctl_close( chandle );
7206 snd_ctl_close( chandle );
7207 snd_card_next( &card );
7210 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7211 if ( result == 0 ) {
7212 if ( nDevices == device ) {
7213 strcpy( name, "default" );
7219 if ( nDevices == 0 ) {
7220 // This should not happen because a check is made before this function is called.
7221 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7225 if ( device >= nDevices ) {
7226 // This should not happen because a check is made before this function is called.
7227 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7234 // The getDeviceInfo() function will not work for a device that is
7235 // already open. Thus, we'll probe the system before opening a
7236 // stream and save the results for use by getDeviceInfo().
7237 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7238 this->saveDeviceInfo();
7240 snd_pcm_stream_t stream;
7241 if ( mode == OUTPUT )
7242 stream = SND_PCM_STREAM_PLAYBACK;
7244 stream = SND_PCM_STREAM_CAPTURE;
7247 int openMode = SND_PCM_ASYNC;
7248 result = snd_pcm_open( &phandle, name, stream, openMode );
7250 if ( mode == OUTPUT )
7251 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7253 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7254 errorText_ = errorStream_.str();
7258 // Fill the parameter structure.
7259 snd_pcm_hw_params_t *hw_params;
7260 snd_pcm_hw_params_alloca( &hw_params );
7261 result = snd_pcm_hw_params_any( phandle, hw_params );
7263 snd_pcm_close( phandle );
7264 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7265 errorText_ = errorStream_.str();
7269 #if defined(__RTAUDIO_DEBUG__)
7270 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7271 snd_pcm_hw_params_dump( hw_params, out );
7274 // Set access ... check user preference.
7275 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7276 stream_.userInterleaved = false;
7277 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7279 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7280 stream_.deviceInterleaved[mode] = true;
7283 stream_.deviceInterleaved[mode] = false;
7286 stream_.userInterleaved = true;
7287 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7289 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7290 stream_.deviceInterleaved[mode] = false;
7293 stream_.deviceInterleaved[mode] = true;
7297 snd_pcm_close( phandle );
7298 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7299 errorText_ = errorStream_.str();
7303 // Determine how to set the device format.
7304 stream_.userFormat = format;
7305 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7307 if ( format == RTAUDIO_SINT8 )
7308 deviceFormat = SND_PCM_FORMAT_S8;
7309 else if ( format == RTAUDIO_SINT16 )
7310 deviceFormat = SND_PCM_FORMAT_S16;
7311 else if ( format == RTAUDIO_SINT24 )
7312 deviceFormat = SND_PCM_FORMAT_S24;
7313 else if ( format == RTAUDIO_SINT32 )
7314 deviceFormat = SND_PCM_FORMAT_S32;
7315 else if ( format == RTAUDIO_FLOAT32 )
7316 deviceFormat = SND_PCM_FORMAT_FLOAT;
7317 else if ( format == RTAUDIO_FLOAT64 )
7318 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7320 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7321 stream_.deviceFormat[mode] = format;
7325 // The user requested format is not natively supported by the device.
7326 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7327 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7328 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7332 deviceFormat = SND_PCM_FORMAT_FLOAT;
7333 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7334 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7338 deviceFormat = SND_PCM_FORMAT_S32;
7339 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7340 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7344 deviceFormat = SND_PCM_FORMAT_S24;
7345 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7346 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7350 deviceFormat = SND_PCM_FORMAT_S16;
7351 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7352 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7356 deviceFormat = SND_PCM_FORMAT_S8;
7357 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7358 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7362 // If we get here, no supported format was found.
7363 snd_pcm_close( phandle );
7364 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7365 errorText_ = errorStream_.str();
7369 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7371 snd_pcm_close( phandle );
7372 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7373 errorText_ = errorStream_.str();
7377 // Determine whether byte-swaping is necessary.
7378 stream_.doByteSwap[mode] = false;
7379 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7380 result = snd_pcm_format_cpu_endian( deviceFormat );
7382 stream_.doByteSwap[mode] = true;
7383 else if (result < 0) {
7384 snd_pcm_close( phandle );
7385 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7386 errorText_ = errorStream_.str();
7391 // Set the sample rate.
7392 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7394 snd_pcm_close( phandle );
7395 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7396 errorText_ = errorStream_.str();
7400 // Determine the number of channels for this device. We support a possible
7401 // minimum device channel number > than the value requested by the user.
7402 stream_.nUserChannels[mode] = channels;
7404 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7405 unsigned int deviceChannels = value;
7406 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7407 snd_pcm_close( phandle );
7408 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7409 errorText_ = errorStream_.str();
7413 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7415 snd_pcm_close( phandle );
7416 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7417 errorText_ = errorStream_.str();
7420 deviceChannels = value;
7421 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7422 stream_.nDeviceChannels[mode] = deviceChannels;
7424 // Set the device channels.
7425 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7427 snd_pcm_close( phandle );
7428 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7429 errorText_ = errorStream_.str();
7433 // Set the buffer (or period) size.
7435 snd_pcm_uframes_t periodSize = *bufferSize;
7436 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7438 snd_pcm_close( phandle );
7439 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7440 errorText_ = errorStream_.str();
7443 *bufferSize = periodSize;
7445 // Set the buffer number, which in ALSA is referred to as the "period".
7446 unsigned int periods = 0;
7447 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7448 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7449 if ( periods < 2 ) periods = 4; // a fairly safe default value
7450 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7452 snd_pcm_close( phandle );
7453 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7454 errorText_ = errorStream_.str();
7458 // If attempting to setup a duplex stream, the bufferSize parameter
7459 // MUST be the same in both directions!
7460 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7461 snd_pcm_close( phandle );
7462 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7463 errorText_ = errorStream_.str();
7467 stream_.bufferSize = *bufferSize;
7469 // Install the hardware configuration
7470 result = snd_pcm_hw_params( phandle, hw_params );
7472 snd_pcm_close( phandle );
7473 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7474 errorText_ = errorStream_.str();
7478 #if defined(__RTAUDIO_DEBUG__)
7479 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7480 snd_pcm_hw_params_dump( hw_params, out );
7483 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7484 snd_pcm_sw_params_t *sw_params = NULL;
7485 snd_pcm_sw_params_alloca( &sw_params );
7486 snd_pcm_sw_params_current( phandle, sw_params );
7487 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7488 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7489 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7491 // The following two settings were suggested by Theo Veenker
7492 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7493 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7495 // here are two options for a fix
7496 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7497 snd_pcm_uframes_t val;
7498 snd_pcm_sw_params_get_boundary( sw_params, &val );
7499 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7501 result = snd_pcm_sw_params( phandle, sw_params );
7503 snd_pcm_close( phandle );
7504 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7505 errorText_ = errorStream_.str();
7509 #if defined(__RTAUDIO_DEBUG__)
7510 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7511 snd_pcm_sw_params_dump( sw_params, out );
7514 // Set flags for buffer conversion
7515 stream_.doConvertBuffer[mode] = false;
7516 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7517 stream_.doConvertBuffer[mode] = true;
7518 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7519 stream_.doConvertBuffer[mode] = true;
7520 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7521 stream_.nUserChannels[mode] > 1 )
7522 stream_.doConvertBuffer[mode] = true;
7524 // Allocate the ApiHandle if necessary and then save.
7525 AlsaHandle *apiInfo = 0;
7526 if ( stream_.apiHandle == 0 ) {
7528 apiInfo = (AlsaHandle *) new AlsaHandle;
7530 catch ( std::bad_alloc& ) {
7531 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7535 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7536 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7540 stream_.apiHandle = (void *) apiInfo;
7541 apiInfo->handles[0] = 0;
7542 apiInfo->handles[1] = 0;
7545 apiInfo = (AlsaHandle *) stream_.apiHandle;
7547 apiInfo->handles[mode] = phandle;
7550 // Allocate necessary internal buffers.
7551 unsigned long bufferBytes;
7552 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7553 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7554 if ( stream_.userBuffer[mode] == NULL ) {
7555 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7559 if ( stream_.doConvertBuffer[mode] ) {
7561 bool makeBuffer = true;
7562 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7563 if ( mode == INPUT ) {
7564 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7565 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7566 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7571 bufferBytes *= *bufferSize;
7572 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7573 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7574 if ( stream_.deviceBuffer == NULL ) {
7575 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7581 stream_.sampleRate = sampleRate;
7582 stream_.nBuffers = periods;
7583 stream_.device[mode] = device;
7584 stream_.state = STREAM_STOPPED;
7586 // Setup the buffer conversion information structure.
7587 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7589 // Setup thread if necessary.
7590 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7591 // We had already set up an output stream.
7592 stream_.mode = DUPLEX;
7593 // Link the streams if possible.
7594 apiInfo->synchronized = false;
7595 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7596 apiInfo->synchronized = true;
7598 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7599 error( RtAudioError::WARNING );
7603 stream_.mode = mode;
7605 // Setup callback thread.
7606 stream_.callbackInfo.object = (void *) this;
7608 // Set the thread attributes for joinable and realtime scheduling
7609 // priority (optional). The higher priority will only take affect
7610 // if the program is run as root or suid. Note, under Linux
7611 // processes with CAP_SYS_NICE privilege, a user can change
7612 // scheduling policy and priority (thus need not be root). See
7613 // POSIX "capabilities".
7614 pthread_attr_t attr;
7615 pthread_attr_init( &attr );
7616 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7617 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7618 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7619 stream_.callbackInfo.doRealtime = true;
7620 struct sched_param param;
7621 int priority = options->priority;
7622 int min = sched_get_priority_min( SCHED_RR );
7623 int max = sched_get_priority_max( SCHED_RR );
7624 if ( priority < min ) priority = min;
7625 else if ( priority > max ) priority = max;
7626 param.sched_priority = priority;
7628 // Set the policy BEFORE the priority. Otherwise it fails.
7629 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7630 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7631 // This is definitely required. Otherwise it fails.
7632 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7633 pthread_attr_setschedparam(&attr, ¶m);
7636 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7638 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7641 stream_.callbackInfo.isRunning = true;
7642 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7643 pthread_attr_destroy( &attr );
7645 // Failed. Try instead with default attributes.
7646 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7648 stream_.callbackInfo.isRunning = false;
7649 errorText_ = "RtApiAlsa::error creating callback thread!";
7659 pthread_cond_destroy( &apiInfo->runnable_cv );
7660 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7661 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7663 stream_.apiHandle = 0;
7666 if ( phandle) snd_pcm_close( phandle );
7668 for ( int i=0; i<2; i++ ) {
7669 if ( stream_.userBuffer[i] ) {
7670 free( stream_.userBuffer[i] );
7671 stream_.userBuffer[i] = 0;
7675 if ( stream_.deviceBuffer ) {
7676 free( stream_.deviceBuffer );
7677 stream_.deviceBuffer = 0;
7680 stream_.state = STREAM_CLOSED;
7684 void RtApiAlsa :: closeStream()
7686 if ( stream_.state == STREAM_CLOSED ) {
7687 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7688 error( RtAudioError::WARNING );
7692 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7693 stream_.callbackInfo.isRunning = false;
7694 MUTEX_LOCK( &stream_.mutex );
7695 if ( stream_.state == STREAM_STOPPED ) {
7696 apiInfo->runnable = true;
7697 pthread_cond_signal( &apiInfo->runnable_cv );
7699 MUTEX_UNLOCK( &stream_.mutex );
7700 pthread_join( stream_.callbackInfo.thread, NULL );
7702 if ( stream_.state == STREAM_RUNNING ) {
7703 stream_.state = STREAM_STOPPED;
7704 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7705 snd_pcm_drop( apiInfo->handles[0] );
7706 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7707 snd_pcm_drop( apiInfo->handles[1] );
7711 pthread_cond_destroy( &apiInfo->runnable_cv );
7712 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7713 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7715 stream_.apiHandle = 0;
7718 for ( int i=0; i<2; i++ ) {
7719 if ( stream_.userBuffer[i] ) {
7720 free( stream_.userBuffer[i] );
7721 stream_.userBuffer[i] = 0;
7725 if ( stream_.deviceBuffer ) {
7726 free( stream_.deviceBuffer );
7727 stream_.deviceBuffer = 0;
7730 stream_.mode = UNINITIALIZED;
7731 stream_.state = STREAM_CLOSED;
7734 void RtApiAlsa :: startStream()
7736 // This method calls snd_pcm_prepare if the device isn't already in that state.
7739 if ( stream_.state == STREAM_RUNNING ) {
7740 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7741 error( RtAudioError::WARNING );
7745 MUTEX_LOCK( &stream_.mutex );
7748 snd_pcm_state_t state;
7749 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7750 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7751 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7752 state = snd_pcm_state( handle[0] );
7753 if ( state != SND_PCM_STATE_PREPARED ) {
7754 result = snd_pcm_prepare( handle[0] );
7756 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7757 errorText_ = errorStream_.str();
7763 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7764 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7765 state = snd_pcm_state( handle[1] );
7766 if ( state != SND_PCM_STATE_PREPARED ) {
7767 result = snd_pcm_prepare( handle[1] );
7769 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7770 errorText_ = errorStream_.str();
7776 stream_.state = STREAM_RUNNING;
7779 apiInfo->runnable = true;
7780 pthread_cond_signal( &apiInfo->runnable_cv );
7781 MUTEX_UNLOCK( &stream_.mutex );
7783 if ( result >= 0 ) return;
7784 error( RtAudioError::SYSTEM_ERROR );
7787 void RtApiAlsa :: stopStream()
7790 if ( stream_.state == STREAM_STOPPED ) {
7791 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7792 error( RtAudioError::WARNING );
7796 stream_.state = STREAM_STOPPED;
7797 MUTEX_LOCK( &stream_.mutex );
7800 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7801 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7802 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7803 if ( apiInfo->synchronized )
7804 result = snd_pcm_drop( handle[0] );
7806 result = snd_pcm_drain( handle[0] );
7808 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7809 errorText_ = errorStream_.str();
7814 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7815 result = snd_pcm_drop( handle[1] );
7817 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7818 errorText_ = errorStream_.str();
7824 apiInfo->runnable = false; // fixes high CPU usage when stopped
7825 MUTEX_UNLOCK( &stream_.mutex );
7827 if ( result >= 0 ) return;
7828 error( RtAudioError::SYSTEM_ERROR );
7831 void RtApiAlsa :: abortStream()
7834 if ( stream_.state == STREAM_STOPPED ) {
7835 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7836 error( RtAudioError::WARNING );
7840 stream_.state = STREAM_STOPPED;
7841 MUTEX_LOCK( &stream_.mutex );
7844 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7845 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7846 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7847 result = snd_pcm_drop( handle[0] );
7849 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7850 errorText_ = errorStream_.str();
7855 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7856 result = snd_pcm_drop( handle[1] );
7858 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7859 errorText_ = errorStream_.str();
7865 apiInfo->runnable = false; // fixes high CPU usage when stopped
7866 MUTEX_UNLOCK( &stream_.mutex );
7868 if ( result >= 0 ) return;
7869 error( RtAudioError::SYSTEM_ERROR );
7872 void RtApiAlsa :: callbackEvent()
7874 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7875 if ( stream_.state == STREAM_STOPPED ) {
7876 MUTEX_LOCK( &stream_.mutex );
7877 while ( !apiInfo->runnable )
7878 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7880 if ( stream_.state != STREAM_RUNNING ) {
7881 MUTEX_UNLOCK( &stream_.mutex );
7884 MUTEX_UNLOCK( &stream_.mutex );
7887 if ( stream_.state == STREAM_CLOSED ) {
7888 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7889 error( RtAudioError::WARNING );
7893 int doStopStream = 0;
7894 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7895 double streamTime = getStreamTime();
7896 RtAudioStreamStatus status = 0;
7897 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7898 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7899 apiInfo->xrun[0] = false;
7901 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7902 status |= RTAUDIO_INPUT_OVERFLOW;
7903 apiInfo->xrun[1] = false;
7905 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7906 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7908 if ( doStopStream == 2 ) {
7913 MUTEX_LOCK( &stream_.mutex );
7915 // The state might change while waiting on a mutex.
7916 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7922 snd_pcm_sframes_t frames;
7923 RtAudioFormat format;
7924 handle = (snd_pcm_t **) apiInfo->handles;
7926 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7928 // Setup parameters.
7929 if ( stream_.doConvertBuffer[1] ) {
7930 buffer = stream_.deviceBuffer;
7931 channels = stream_.nDeviceChannels[1];
7932 format = stream_.deviceFormat[1];
7935 buffer = stream_.userBuffer[1];
7936 channels = stream_.nUserChannels[1];
7937 format = stream_.userFormat;
7940 // Read samples from device in interleaved/non-interleaved format.
7941 if ( stream_.deviceInterleaved[1] )
7942 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7944 void *bufs[channels];
7945 size_t offset = stream_.bufferSize * formatBytes( format );
7946 for ( int i=0; i<channels; i++ )
7947 bufs[i] = (void *) (buffer + (i * offset));
7948 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7951 if ( result < (int) stream_.bufferSize ) {
7952 // Either an error or overrun occured.
7953 if ( result == -EPIPE ) {
7954 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7955 if ( state == SND_PCM_STATE_XRUN ) {
7956 apiInfo->xrun[1] = true;
7957 result = snd_pcm_prepare( handle[1] );
7959 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7960 errorText_ = errorStream_.str();
7964 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7965 errorText_ = errorStream_.str();
7969 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7970 errorText_ = errorStream_.str();
7972 error( RtAudioError::WARNING );
7976 // Do byte swapping if necessary.
7977 if ( stream_.doByteSwap[1] )
7978 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7980 // Do buffer conversion if necessary.
7981 if ( stream_.doConvertBuffer[1] )
7982 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7984 // Check stream latency
7985 result = snd_pcm_delay( handle[1], &frames );
7986 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7991 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7993 // Setup parameters and do buffer conversion if necessary.
7994 if ( stream_.doConvertBuffer[0] ) {
7995 buffer = stream_.deviceBuffer;
7996 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
7997 channels = stream_.nDeviceChannels[0];
7998 format = stream_.deviceFormat[0];
8001 buffer = stream_.userBuffer[0];
8002 channels = stream_.nUserChannels[0];
8003 format = stream_.userFormat;
8006 // Do byte swapping if necessary.
8007 if ( stream_.doByteSwap[0] )
8008 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8010 // Write samples to device in interleaved/non-interleaved format.
8011 if ( stream_.deviceInterleaved[0] )
8012 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8014 void *bufs[channels];
8015 size_t offset = stream_.bufferSize * formatBytes( format );
8016 for ( int i=0; i<channels; i++ )
8017 bufs[i] = (void *) (buffer + (i * offset));
8018 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8021 if ( result < (int) stream_.bufferSize ) {
8022 // Either an error or underrun occured.
8023 if ( result == -EPIPE ) {
8024 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8025 if ( state == SND_PCM_STATE_XRUN ) {
8026 apiInfo->xrun[0] = true;
8027 result = snd_pcm_prepare( handle[0] );
8029 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8030 errorText_ = errorStream_.str();
8033 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8036 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8037 errorText_ = errorStream_.str();
8041 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8042 errorText_ = errorStream_.str();
8044 error( RtAudioError::WARNING );
8048 // Check stream latency
8049 result = snd_pcm_delay( handle[0], &frames );
8050 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8054 MUTEX_UNLOCK( &stream_.mutex );
8056 RtApi::tickStreamTime();
8057 if ( doStopStream == 1 ) this->stopStream();
8060 static void *alsaCallbackHandler( void *ptr )
8062 CallbackInfo *info = (CallbackInfo *) ptr;
8063 RtApiAlsa *object = (RtApiAlsa *) info->object;
8064 bool *isRunning = &info->isRunning;
8066 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8067 if ( info->doRealtime ) {
8068 std::cerr << "RtAudio alsa: " <<
8069 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8070 "running realtime scheduling" << std::endl;
8074 while ( *isRunning == true ) {
8075 pthread_testcancel();
8076 object->callbackEvent();
8079 pthread_exit( NULL );
8082 //******************** End of __LINUX_ALSA__ *********************//
8085 #if defined(__LINUX_PULSE__)
8087 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8088 // and Tristan Matthews.
8090 #include <pulse/error.h>
8091 #include <pulse/simple.h>
8094 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8095 44100, 48000, 96000, 0};
8097 struct rtaudio_pa_format_mapping_t {
8098 RtAudioFormat rtaudio_format;
8099 pa_sample_format_t pa_format;
8102 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8103 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8104 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8105 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8106 {0, PA_SAMPLE_INVALID}};
8108 struct PulseAudioHandle {
8112 pthread_cond_t runnable_cv;
8114 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8117 RtApiPulse::~RtApiPulse()
8119 if ( stream_.state != STREAM_CLOSED )
8123 unsigned int RtApiPulse::getDeviceCount( void )
8128 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8130 RtAudio::DeviceInfo info;
8132 info.name = "PulseAudio";
8133 info.outputChannels = 2;
8134 info.inputChannels = 2;
8135 info.duplexChannels = 2;
8136 info.isDefaultOutput = true;
8137 info.isDefaultInput = true;
8139 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8140 info.sampleRates.push_back( *sr );
8142 info.preferredSampleRate = 48000;
8143 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8148 static void *pulseaudio_callback( void * user )
8150 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8151 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8152 volatile bool *isRunning = &cbi->isRunning;
8154 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8155 if (cbi->doRealtime) {
8156 std::cerr << "RtAudio pulse: " <<
8157 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8158 "running realtime scheduling" << std::endl;
8162 while ( *isRunning ) {
8163 pthread_testcancel();
8164 context->callbackEvent();
8167 pthread_exit( NULL );
8170 void RtApiPulse::closeStream( void )
8172 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8174 stream_.callbackInfo.isRunning = false;
8176 MUTEX_LOCK( &stream_.mutex );
8177 if ( stream_.state == STREAM_STOPPED ) {
8178 pah->runnable = true;
8179 pthread_cond_signal( &pah->runnable_cv );
8181 MUTEX_UNLOCK( &stream_.mutex );
8183 pthread_join( pah->thread, 0 );
8184 if ( pah->s_play ) {
8185 pa_simple_flush( pah->s_play, NULL );
8186 pa_simple_free( pah->s_play );
8189 pa_simple_free( pah->s_rec );
8191 pthread_cond_destroy( &pah->runnable_cv );
8193 stream_.apiHandle = 0;
8196 if ( stream_.userBuffer[0] ) {
8197 free( stream_.userBuffer[0] );
8198 stream_.userBuffer[0] = 0;
8200 if ( stream_.userBuffer[1] ) {
8201 free( stream_.userBuffer[1] );
8202 stream_.userBuffer[1] = 0;
8205 stream_.state = STREAM_CLOSED;
8206 stream_.mode = UNINITIALIZED;
8209 void RtApiPulse::callbackEvent( void )
8211 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8213 if ( stream_.state == STREAM_STOPPED ) {
8214 MUTEX_LOCK( &stream_.mutex );
8215 while ( !pah->runnable )
8216 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8218 if ( stream_.state != STREAM_RUNNING ) {
8219 MUTEX_UNLOCK( &stream_.mutex );
8222 MUTEX_UNLOCK( &stream_.mutex );
8225 if ( stream_.state == STREAM_CLOSED ) {
8226 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8227 "this shouldn't happen!";
8228 error( RtAudioError::WARNING );
8232 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8233 double streamTime = getStreamTime();
8234 RtAudioStreamStatus status = 0;
8235 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8236 stream_.bufferSize, streamTime, status,
8237 stream_.callbackInfo.userData );
8239 if ( doStopStream == 2 ) {
8244 MUTEX_LOCK( &stream_.mutex );
8245 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8246 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8248 if ( stream_.state != STREAM_RUNNING )
8253 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8254 if ( stream_.doConvertBuffer[OUTPUT] ) {
8255 convertBuffer( stream_.deviceBuffer,
8256 stream_.userBuffer[OUTPUT],
8257 stream_.convertInfo[OUTPUT] );
8258 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8259 formatBytes( stream_.deviceFormat[OUTPUT] );
8261 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8262 formatBytes( stream_.userFormat );
8264 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8265 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8266 pa_strerror( pa_error ) << ".";
8267 errorText_ = errorStream_.str();
8268 error( RtAudioError::WARNING );
8272 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8273 if ( stream_.doConvertBuffer[INPUT] )
8274 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8275 formatBytes( stream_.deviceFormat[INPUT] );
8277 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8278 formatBytes( stream_.userFormat );
8280 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8281 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8282 pa_strerror( pa_error ) << ".";
8283 errorText_ = errorStream_.str();
8284 error( RtAudioError::WARNING );
8286 if ( stream_.doConvertBuffer[INPUT] ) {
8287 convertBuffer( stream_.userBuffer[INPUT],
8288 stream_.deviceBuffer,
8289 stream_.convertInfo[INPUT] );
8294 MUTEX_UNLOCK( &stream_.mutex );
8295 RtApi::tickStreamTime();
8297 if ( doStopStream == 1 )
8301 void RtApiPulse::startStream( void )
8303 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8305 if ( stream_.state == STREAM_CLOSED ) {
8306 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8307 error( RtAudioError::INVALID_USE );
8310 if ( stream_.state == STREAM_RUNNING ) {
8311 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8312 error( RtAudioError::WARNING );
8316 MUTEX_LOCK( &stream_.mutex );
8318 stream_.state = STREAM_RUNNING;
8320 pah->runnable = true;
8321 pthread_cond_signal( &pah->runnable_cv );
8322 MUTEX_UNLOCK( &stream_.mutex );
8325 void RtApiPulse::stopStream( void )
8327 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8329 if ( stream_.state == STREAM_CLOSED ) {
8330 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8331 error( RtAudioError::INVALID_USE );
8334 if ( stream_.state == STREAM_STOPPED ) {
8335 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8336 error( RtAudioError::WARNING );
8340 stream_.state = STREAM_STOPPED;
8341 MUTEX_LOCK( &stream_.mutex );
8343 if ( pah && pah->s_play ) {
8345 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8346 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8347 pa_strerror( pa_error ) << ".";
8348 errorText_ = errorStream_.str();
8349 MUTEX_UNLOCK( &stream_.mutex );
8350 error( RtAudioError::SYSTEM_ERROR );
8355 stream_.state = STREAM_STOPPED;
8356 MUTEX_UNLOCK( &stream_.mutex );
8359 void RtApiPulse::abortStream( void )
8361 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8363 if ( stream_.state == STREAM_CLOSED ) {
8364 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8365 error( RtAudioError::INVALID_USE );
8368 if ( stream_.state == STREAM_STOPPED ) {
8369 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8370 error( RtAudioError::WARNING );
8374 stream_.state = STREAM_STOPPED;
8375 MUTEX_LOCK( &stream_.mutex );
8377 if ( pah && pah->s_play ) {
8379 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8380 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8381 pa_strerror( pa_error ) << ".";
8382 errorText_ = errorStream_.str();
8383 MUTEX_UNLOCK( &stream_.mutex );
8384 error( RtAudioError::SYSTEM_ERROR );
8389 stream_.state = STREAM_STOPPED;
8390 MUTEX_UNLOCK( &stream_.mutex );
8393 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8394 unsigned int channels, unsigned int firstChannel,
8395 unsigned int sampleRate, RtAudioFormat format,
8396 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8398 PulseAudioHandle *pah = 0;
8399 unsigned long bufferBytes = 0;
8402 if ( device != 0 ) return false;
8403 if ( mode != INPUT && mode != OUTPUT ) return false;
8404 if ( channels != 1 && channels != 2 ) {
8405 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8408 ss.channels = channels;
8410 if ( firstChannel != 0 ) return false;
8412 bool sr_found = false;
8413 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8414 if ( sampleRate == *sr ) {
8416 stream_.sampleRate = sampleRate;
8417 ss.rate = sampleRate;
8422 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8427 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8428 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8429 if ( format == sf->rtaudio_format ) {
8431 stream_.userFormat = sf->rtaudio_format;
8432 stream_.deviceFormat[mode] = stream_.userFormat;
8433 ss.format = sf->pa_format;
8437 if ( !sf_found ) { // Use internal data format conversion.
8438 stream_.userFormat = format;
8439 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8440 ss.format = PA_SAMPLE_FLOAT32LE;
8443 // Set other stream parameters.
8444 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8445 else stream_.userInterleaved = true;
8446 stream_.deviceInterleaved[mode] = true;
8447 stream_.nBuffers = 1;
8448 stream_.doByteSwap[mode] = false;
8449 stream_.nUserChannels[mode] = channels;
8450 stream_.nDeviceChannels[mode] = channels + firstChannel;
8451 stream_.channelOffset[mode] = 0;
8452 std::string streamName = "RtAudio";
8454 // Set flags for buffer conversion.
8455 stream_.doConvertBuffer[mode] = false;
8456 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8457 stream_.doConvertBuffer[mode] = true;
8458 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8459 stream_.doConvertBuffer[mode] = true;
8461 // Allocate necessary internal buffers.
8462 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8463 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8464 if ( stream_.userBuffer[mode] == NULL ) {
8465 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8468 stream_.bufferSize = *bufferSize;
8470 if ( stream_.doConvertBuffer[mode] ) {
8472 bool makeBuffer = true;
8473 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8474 if ( mode == INPUT ) {
8475 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8476 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8477 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8482 bufferBytes *= *bufferSize;
8483 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8484 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8485 if ( stream_.deviceBuffer == NULL ) {
8486 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8492 stream_.device[mode] = device;
8494 // Setup the buffer conversion information structure.
8495 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8497 if ( !stream_.apiHandle ) {
8498 PulseAudioHandle *pah = new PulseAudioHandle;
8500 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8504 stream_.apiHandle = pah;
8505 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8506 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8510 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8513 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8516 pa_buffer_attr buffer_attr;
8517 buffer_attr.fragsize = bufferBytes;
8518 buffer_attr.maxlength = -1;
8520 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8521 if ( !pah->s_rec ) {
8522 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8527 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8528 if ( !pah->s_play ) {
8529 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8537 if ( stream_.mode == UNINITIALIZED )
8538 stream_.mode = mode;
8539 else if ( stream_.mode == mode )
8542 stream_.mode = DUPLEX;
8544 if ( !stream_.callbackInfo.isRunning ) {
8545 stream_.callbackInfo.object = this;
8547 stream_.state = STREAM_STOPPED;
8548 // Set the thread attributes for joinable and realtime scheduling
8549 // priority (optional). The higher priority will only take affect
8550 // if the program is run as root or suid. Note, under Linux
8551 // processes with CAP_SYS_NICE privilege, a user can change
8552 // scheduling policy and priority (thus need not be root). See
8553 // POSIX "capabilities".
8554 pthread_attr_t attr;
8555 pthread_attr_init( &attr );
8556 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8557 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8558 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8559 stream_.callbackInfo.doRealtime = true;
8560 struct sched_param param;
8561 int priority = options->priority;
8562 int min = sched_get_priority_min( SCHED_RR );
8563 int max = sched_get_priority_max( SCHED_RR );
8564 if ( priority < min ) priority = min;
8565 else if ( priority > max ) priority = max;
8566 param.sched_priority = priority;
8568 // Set the policy BEFORE the priority. Otherwise it fails.
8569 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8570 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8571 // This is definitely required. Otherwise it fails.
8572 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8573 pthread_attr_setschedparam(&attr, ¶m);
8576 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8578 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8581 stream_.callbackInfo.isRunning = true;
8582 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8583 pthread_attr_destroy(&attr);
8585 // Failed. Try instead with default attributes.
8586 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8588 stream_.callbackInfo.isRunning = false;
8589 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8598 if ( pah && stream_.callbackInfo.isRunning ) {
8599 pthread_cond_destroy( &pah->runnable_cv );
8601 stream_.apiHandle = 0;
8604 for ( int i=0; i<2; i++ ) {
8605 if ( stream_.userBuffer[i] ) {
8606 free( stream_.userBuffer[i] );
8607 stream_.userBuffer[i] = 0;
8611 if ( stream_.deviceBuffer ) {
8612 free( stream_.deviceBuffer );
8613 stream_.deviceBuffer = 0;
8616 stream_.state = STREAM_CLOSED;
8620 //******************** End of __LINUX_PULSE__ *********************//
8623 #if defined(__LINUX_OSS__)
8626 #include <sys/ioctl.h>
8629 #include <sys/soundcard.h>
8633 static void *ossCallbackHandler(void * ptr);
8635 // A structure to hold various information related to the OSS API
8638 int id[2]; // device ids
8641 pthread_cond_t runnable;
8644 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8647 RtApiOss :: RtApiOss()
8649 // Nothing to do here.
8652 RtApiOss :: ~RtApiOss()
8654 if ( stream_.state != STREAM_CLOSED ) closeStream();
8657 unsigned int RtApiOss :: getDeviceCount( void )
8659 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8660 if ( mixerfd == -1 ) {
8661 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8662 error( RtAudioError::WARNING );
8666 oss_sysinfo sysinfo;
8667 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8669 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8670 error( RtAudioError::WARNING );
8675 return sysinfo.numaudios;
8678 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8680 RtAudio::DeviceInfo info;
8681 info.probed = false;
8683 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8684 if ( mixerfd == -1 ) {
8685 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8686 error( RtAudioError::WARNING );
8690 oss_sysinfo sysinfo;
8691 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8692 if ( result == -1 ) {
8694 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8695 error( RtAudioError::WARNING );
8699 unsigned nDevices = sysinfo.numaudios;
8700 if ( nDevices == 0 ) {
8702 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8703 error( RtAudioError::INVALID_USE );
8707 if ( device >= nDevices ) {
8709 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8710 error( RtAudioError::INVALID_USE );
8714 oss_audioinfo ainfo;
8716 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8718 if ( result == -1 ) {
8719 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8720 errorText_ = errorStream_.str();
8721 error( RtAudioError::WARNING );
8726 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8727 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8728 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8729 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8730 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8733 // Probe data formats ... do for input
8734 unsigned long mask = ainfo.iformats;
8735 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8736 info.nativeFormats |= RTAUDIO_SINT16;
8737 if ( mask & AFMT_S8 )
8738 info.nativeFormats |= RTAUDIO_SINT8;
8739 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8740 info.nativeFormats |= RTAUDIO_SINT32;
8742 if ( mask & AFMT_FLOAT )
8743 info.nativeFormats |= RTAUDIO_FLOAT32;
8745 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8746 info.nativeFormats |= RTAUDIO_SINT24;
8748 // Check that we have at least one supported format
8749 if ( info.nativeFormats == 0 ) {
8750 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8751 errorText_ = errorStream_.str();
8752 error( RtAudioError::WARNING );
8756 // Probe the supported sample rates.
8757 info.sampleRates.clear();
8758 if ( ainfo.nrates ) {
8759 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8760 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8761 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8762 info.sampleRates.push_back( SAMPLE_RATES[k] );
8764 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8765 info.preferredSampleRate = SAMPLE_RATES[k];
8773 // Check min and max rate values;
8774 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8775 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8776 info.sampleRates.push_back( SAMPLE_RATES[k] );
8778 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8779 info.preferredSampleRate = SAMPLE_RATES[k];
8784 if ( info.sampleRates.size() == 0 ) {
8785 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8786 errorText_ = errorStream_.str();
8787 error( RtAudioError::WARNING );
8791 info.name = ainfo.name;
8798 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8799 unsigned int firstChannel, unsigned int sampleRate,
8800 RtAudioFormat format, unsigned int *bufferSize,
8801 RtAudio::StreamOptions *options )
8803 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8804 if ( mixerfd == -1 ) {
8805 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8809 oss_sysinfo sysinfo;
8810 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8811 if ( result == -1 ) {
8813 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8817 unsigned nDevices = sysinfo.numaudios;
8818 if ( nDevices == 0 ) {
8819 // This should not happen because a check is made before this function is called.
8821 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8825 if ( device >= nDevices ) {
8826 // This should not happen because a check is made before this function is called.
8828 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8832 oss_audioinfo ainfo;
8834 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8836 if ( result == -1 ) {
8837 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8838 errorText_ = errorStream_.str();
8842 // Check if device supports input or output
8843 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8844 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8845 if ( mode == OUTPUT )
8846 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8848 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8849 errorText_ = errorStream_.str();
8854 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8855 if ( mode == OUTPUT )
8857 else { // mode == INPUT
8858 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8859 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8860 close( handle->id[0] );
8862 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8863 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8864 errorText_ = errorStream_.str();
8867 // Check that the number previously set channels is the same.
8868 if ( stream_.nUserChannels[0] != channels ) {
8869 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8870 errorText_ = errorStream_.str();
8879 // Set exclusive access if specified.
8880 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8882 // Try to open the device.
8884 fd = open( ainfo.devnode, flags, 0 );
8886 if ( errno == EBUSY )
8887 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8889 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8890 errorText_ = errorStream_.str();
8894 // For duplex operation, specifically set this mode (this doesn't seem to work).
8896 if ( flags | O_RDWR ) {
8897 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8898 if ( result == -1) {
8899 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8900 errorText_ = errorStream_.str();
8906 // Check the device channel support.
8907 stream_.nUserChannels[mode] = channels;
8908 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8910 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8911 errorText_ = errorStream_.str();
8915 // Set the number of channels.
8916 int deviceChannels = channels + firstChannel;
8917 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8918 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8920 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8921 errorText_ = errorStream_.str();
8924 stream_.nDeviceChannels[mode] = deviceChannels;
8926 // Get the data format mask
8928 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8929 if ( result == -1 ) {
8931 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8932 errorText_ = errorStream_.str();
8936 // Determine how to set the device format.
8937 stream_.userFormat = format;
8938 int deviceFormat = -1;
8939 stream_.doByteSwap[mode] = false;
8940 if ( format == RTAUDIO_SINT8 ) {
8941 if ( mask & AFMT_S8 ) {
8942 deviceFormat = AFMT_S8;
8943 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8946 else if ( format == RTAUDIO_SINT16 ) {
8947 if ( mask & AFMT_S16_NE ) {
8948 deviceFormat = AFMT_S16_NE;
8949 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8951 else if ( mask & AFMT_S16_OE ) {
8952 deviceFormat = AFMT_S16_OE;
8953 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8954 stream_.doByteSwap[mode] = true;
8957 else if ( format == RTAUDIO_SINT24 ) {
8958 if ( mask & AFMT_S24_NE ) {
8959 deviceFormat = AFMT_S24_NE;
8960 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8962 else if ( mask & AFMT_S24_OE ) {
8963 deviceFormat = AFMT_S24_OE;
8964 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8965 stream_.doByteSwap[mode] = true;
8968 else if ( format == RTAUDIO_SINT32 ) {
8969 if ( mask & AFMT_S32_NE ) {
8970 deviceFormat = AFMT_S32_NE;
8971 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8973 else if ( mask & AFMT_S32_OE ) {
8974 deviceFormat = AFMT_S32_OE;
8975 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8976 stream_.doByteSwap[mode] = true;
8980 if ( deviceFormat == -1 ) {
8981 // The user requested format is not natively supported by the device.
8982 if ( mask & AFMT_S16_NE ) {
8983 deviceFormat = AFMT_S16_NE;
8984 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8986 else if ( mask & AFMT_S32_NE ) {
8987 deviceFormat = AFMT_S32_NE;
8988 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8990 else if ( mask & AFMT_S24_NE ) {
8991 deviceFormat = AFMT_S24_NE;
8992 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8994 else if ( mask & AFMT_S16_OE ) {
8995 deviceFormat = AFMT_S16_OE;
8996 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8997 stream_.doByteSwap[mode] = true;
8999 else if ( mask & AFMT_S32_OE ) {
9000 deviceFormat = AFMT_S32_OE;
9001 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9002 stream_.doByteSwap[mode] = true;
9004 else if ( mask & AFMT_S24_OE ) {
9005 deviceFormat = AFMT_S24_OE;
9006 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9007 stream_.doByteSwap[mode] = true;
9009 else if ( mask & AFMT_S8) {
9010 deviceFormat = AFMT_S8;
9011 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9015 if ( stream_.deviceFormat[mode] == 0 ) {
9016 // This really shouldn't happen ...
9018 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9019 errorText_ = errorStream_.str();
9023 // Set the data format.
9024 int temp = deviceFormat;
9025 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9026 if ( result == -1 || deviceFormat != temp ) {
9028 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9029 errorText_ = errorStream_.str();
9033 // Attempt to set the buffer size. According to OSS, the minimum
9034 // number of buffers is two. The supposed minimum buffer size is 16
9035 // bytes, so that will be our lower bound. The argument to this
9036 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9037 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9038 // We'll check the actual value used near the end of the setup
9040 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9041 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9043 if ( options ) buffers = options->numberOfBuffers;
9044 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9045 if ( buffers < 2 ) buffers = 3;
9046 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9047 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9048 if ( result == -1 ) {
9050 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9051 errorText_ = errorStream_.str();
9054 stream_.nBuffers = buffers;
9056 // Save buffer size (in sample frames).
9057 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9058 stream_.bufferSize = *bufferSize;
9060 // Set the sample rate.
9061 int srate = sampleRate;
9062 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9063 if ( result == -1 ) {
9065 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9066 errorText_ = errorStream_.str();
9070 // Verify the sample rate setup worked.
9071 if ( abs( srate - (int)sampleRate ) > 100 ) {
9073 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9074 errorText_ = errorStream_.str();
9077 stream_.sampleRate = sampleRate;
9079 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9080 // We're doing duplex setup here.
9081 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9082 stream_.nDeviceChannels[0] = deviceChannels;
9085 // Set interleaving parameters.
9086 stream_.userInterleaved = true;
9087 stream_.deviceInterleaved[mode] = true;
9088 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9089 stream_.userInterleaved = false;
9091 // Set flags for buffer conversion
9092 stream_.doConvertBuffer[mode] = false;
9093 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9094 stream_.doConvertBuffer[mode] = true;
9095 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9096 stream_.doConvertBuffer[mode] = true;
9097 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9098 stream_.nUserChannels[mode] > 1 )
9099 stream_.doConvertBuffer[mode] = true;
9101 // Allocate the stream handles if necessary and then save.
9102 if ( stream_.apiHandle == 0 ) {
9104 handle = new OssHandle;
9106 catch ( std::bad_alloc& ) {
9107 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9111 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9112 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9116 stream_.apiHandle = (void *) handle;
9119 handle = (OssHandle *) stream_.apiHandle;
9121 handle->id[mode] = fd;
9123 // Allocate necessary internal buffers.
9124 unsigned long bufferBytes;
9125 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9126 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9127 if ( stream_.userBuffer[mode] == NULL ) {
9128 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9132 if ( stream_.doConvertBuffer[mode] ) {
9134 bool makeBuffer = true;
9135 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9136 if ( mode == INPUT ) {
9137 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9138 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9139 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9144 bufferBytes *= *bufferSize;
9145 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9146 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9147 if ( stream_.deviceBuffer == NULL ) {
9148 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9154 stream_.device[mode] = device;
9155 stream_.state = STREAM_STOPPED;
9157 // Setup the buffer conversion information structure.
9158 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9160 // Setup thread if necessary.
9161 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9162 // We had already set up an output stream.
9163 stream_.mode = DUPLEX;
9164 if ( stream_.device[0] == device ) handle->id[0] = fd;
9167 stream_.mode = mode;
9169 // Setup callback thread.
9170 stream_.callbackInfo.object = (void *) this;
9172 // Set the thread attributes for joinable and realtime scheduling
9173 // priority. The higher priority will only take affect if the
9174 // program is run as root or suid.
9175 pthread_attr_t attr;
9176 pthread_attr_init( &attr );
9177 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9178 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9179 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9180 stream_.callbackInfo.doRealtime = true;
9181 struct sched_param param;
9182 int priority = options->priority;
9183 int min = sched_get_priority_min( SCHED_RR );
9184 int max = sched_get_priority_max( SCHED_RR );
9185 if ( priority < min ) priority = min;
9186 else if ( priority > max ) priority = max;
9187 param.sched_priority = priority;
9189 // Set the policy BEFORE the priority. Otherwise it fails.
9190 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9191 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9192 // This is definitely required. Otherwise it fails.
9193 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9194 pthread_attr_setschedparam(&attr, ¶m);
9197 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9199 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9202 stream_.callbackInfo.isRunning = true;
9203 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9204 pthread_attr_destroy( &attr );
9206 // Failed. Try instead with default attributes.
9207 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9209 stream_.callbackInfo.isRunning = false;
9210 errorText_ = "RtApiOss::error creating callback thread!";
9220 pthread_cond_destroy( &handle->runnable );
9221 if ( handle->id[0] ) close( handle->id[0] );
9222 if ( handle->id[1] ) close( handle->id[1] );
9224 stream_.apiHandle = 0;
9227 for ( int i=0; i<2; i++ ) {
9228 if ( stream_.userBuffer[i] ) {
9229 free( stream_.userBuffer[i] );
9230 stream_.userBuffer[i] = 0;
9234 if ( stream_.deviceBuffer ) {
9235 free( stream_.deviceBuffer );
9236 stream_.deviceBuffer = 0;
9239 stream_.state = STREAM_CLOSED;
9243 void RtApiOss :: closeStream()
9245 if ( stream_.state == STREAM_CLOSED ) {
9246 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9247 error( RtAudioError::WARNING );
9251 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9252 stream_.callbackInfo.isRunning = false;
9253 MUTEX_LOCK( &stream_.mutex );
9254 if ( stream_.state == STREAM_STOPPED )
9255 pthread_cond_signal( &handle->runnable );
9256 MUTEX_UNLOCK( &stream_.mutex );
9257 pthread_join( stream_.callbackInfo.thread, NULL );
9259 if ( stream_.state == STREAM_RUNNING ) {
9260 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9261 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9263 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9264 stream_.state = STREAM_STOPPED;
9268 pthread_cond_destroy( &handle->runnable );
9269 if ( handle->id[0] ) close( handle->id[0] );
9270 if ( handle->id[1] ) close( handle->id[1] );
9272 stream_.apiHandle = 0;
9275 for ( int i=0; i<2; i++ ) {
9276 if ( stream_.userBuffer[i] ) {
9277 free( stream_.userBuffer[i] );
9278 stream_.userBuffer[i] = 0;
9282 if ( stream_.deviceBuffer ) {
9283 free( stream_.deviceBuffer );
9284 stream_.deviceBuffer = 0;
9287 stream_.mode = UNINITIALIZED;
9288 stream_.state = STREAM_CLOSED;
9291 void RtApiOss :: startStream()
9294 if ( stream_.state == STREAM_RUNNING ) {
9295 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9296 error( RtAudioError::WARNING );
9300 MUTEX_LOCK( &stream_.mutex );
9302 stream_.state = STREAM_RUNNING;
9304 // No need to do anything else here ... OSS automatically starts
9305 // when fed samples.
9307 MUTEX_UNLOCK( &stream_.mutex );
9309 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9310 pthread_cond_signal( &handle->runnable );
9313 void RtApiOss :: stopStream()
9316 if ( stream_.state == STREAM_STOPPED ) {
9317 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9318 error( RtAudioError::WARNING );
9322 MUTEX_LOCK( &stream_.mutex );
9324 // The state might change while waiting on a mutex.
9325 if ( stream_.state == STREAM_STOPPED ) {
9326 MUTEX_UNLOCK( &stream_.mutex );
9331 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9332 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9334 // Flush the output with zeros a few times.
9337 RtAudioFormat format;
9339 if ( stream_.doConvertBuffer[0] ) {
9340 buffer = stream_.deviceBuffer;
9341 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9342 format = stream_.deviceFormat[0];
9345 buffer = stream_.userBuffer[0];
9346 samples = stream_.bufferSize * stream_.nUserChannels[0];
9347 format = stream_.userFormat;
9350 memset( buffer, 0, samples * formatBytes(format) );
9351 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9352 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9353 if ( result == -1 ) {
9354 errorText_ = "RtApiOss::stopStream: audio write error.";
9355 error( RtAudioError::WARNING );
9359 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9360 if ( result == -1 ) {
9361 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9362 errorText_ = errorStream_.str();
9365 handle->triggered = false;
9368 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9369 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9370 if ( result == -1 ) {
9371 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9372 errorText_ = errorStream_.str();
9378 stream_.state = STREAM_STOPPED;
9379 MUTEX_UNLOCK( &stream_.mutex );
9381 if ( result != -1 ) return;
9382 error( RtAudioError::SYSTEM_ERROR );
9385 void RtApiOss :: abortStream()
9388 if ( stream_.state == STREAM_STOPPED ) {
9389 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9390 error( RtAudioError::WARNING );
9394 MUTEX_LOCK( &stream_.mutex );
9396 // The state might change while waiting on a mutex.
9397 if ( stream_.state == STREAM_STOPPED ) {
9398 MUTEX_UNLOCK( &stream_.mutex );
9403 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9404 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9405 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9406 if ( result == -1 ) {
9407 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9408 errorText_ = errorStream_.str();
9411 handle->triggered = false;
9414 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9415 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9416 if ( result == -1 ) {
9417 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9418 errorText_ = errorStream_.str();
9424 stream_.state = STREAM_STOPPED;
9425 MUTEX_UNLOCK( &stream_.mutex );
9427 if ( result != -1 ) return;
9428 error( RtAudioError::SYSTEM_ERROR );
9431 void RtApiOss :: callbackEvent()
9433 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9434 if ( stream_.state == STREAM_STOPPED ) {
9435 MUTEX_LOCK( &stream_.mutex );
9436 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9437 if ( stream_.state != STREAM_RUNNING ) {
9438 MUTEX_UNLOCK( &stream_.mutex );
9441 MUTEX_UNLOCK( &stream_.mutex );
9444 if ( stream_.state == STREAM_CLOSED ) {
9445 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9446 error( RtAudioError::WARNING );
9450 // Invoke user callback to get fresh output data.
9451 int doStopStream = 0;
9452 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9453 double streamTime = getStreamTime();
9454 RtAudioStreamStatus status = 0;
9455 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9456 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9457 handle->xrun[0] = false;
9459 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9460 status |= RTAUDIO_INPUT_OVERFLOW;
9461 handle->xrun[1] = false;
9463 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9464 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9465 if ( doStopStream == 2 ) {
9466 this->abortStream();
9470 MUTEX_LOCK( &stream_.mutex );
9472 // The state might change while waiting on a mutex.
9473 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9478 RtAudioFormat format;
9480 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9482 // Setup parameters and do buffer conversion if necessary.
9483 if ( stream_.doConvertBuffer[0] ) {
9484 buffer = stream_.deviceBuffer;
9485 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9486 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9487 format = stream_.deviceFormat[0];
9490 buffer = stream_.userBuffer[0];
9491 samples = stream_.bufferSize * stream_.nUserChannels[0];
9492 format = stream_.userFormat;
9495 // Do byte swapping if necessary.
9496 if ( stream_.doByteSwap[0] )
9497 byteSwapBuffer( buffer, samples, format );
9499 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9501 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9502 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9503 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9504 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9505 handle->triggered = true;
9508 // Write samples to device.
9509 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9511 if ( result == -1 ) {
9512 // We'll assume this is an underrun, though there isn't a
9513 // specific means for determining that.
9514 handle->xrun[0] = true;
9515 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9516 error( RtAudioError::WARNING );
9517 // Continue on to input section.
9521 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9523 // Setup parameters.
9524 if ( stream_.doConvertBuffer[1] ) {
9525 buffer = stream_.deviceBuffer;
9526 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9527 format = stream_.deviceFormat[1];
9530 buffer = stream_.userBuffer[1];
9531 samples = stream_.bufferSize * stream_.nUserChannels[1];
9532 format = stream_.userFormat;
9535 // Read samples from device.
9536 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9538 if ( result == -1 ) {
9539 // We'll assume this is an overrun, though there isn't a
9540 // specific means for determining that.
9541 handle->xrun[1] = true;
9542 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9543 error( RtAudioError::WARNING );
9547 // Do byte swapping if necessary.
9548 if ( stream_.doByteSwap[1] )
9549 byteSwapBuffer( buffer, samples, format );
9551 // Do buffer conversion if necessary.
9552 if ( stream_.doConvertBuffer[1] )
9553 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9557 MUTEX_UNLOCK( &stream_.mutex );
9559 RtApi::tickStreamTime();
9560 if ( doStopStream == 1 ) this->stopStream();
9563 static void *ossCallbackHandler( void *ptr )
9565 CallbackInfo *info = (CallbackInfo *) ptr;
9566 RtApiOss *object = (RtApiOss *) info->object;
9567 bool *isRunning = &info->isRunning;
9569 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9570 if (info->doRealtime) {
9571 std::cerr << "RtAudio oss: " <<
9572 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9573 "running realtime scheduling" << std::endl;
9577 while ( *isRunning == true ) {
9578 pthread_testcancel();
9579 object->callbackEvent();
9582 pthread_exit( NULL );
9585 //******************** End of __LINUX_OSS__ *********************//
9589 // *************************************************** //
9591 // Protected common (OS-independent) RtAudio methods.
9593 // *************************************************** //
9595 // This method can be modified to control the behavior of error
9596 // message printing.
9597 void RtApi :: error( RtAudioError::Type type )
9599 errorStream_.str(""); // clear the ostringstream
9601 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9602 if ( errorCallback ) {
9603 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9605 if ( firstErrorOccurred_ )
9608 firstErrorOccurred_ = true;
9609 const std::string errorMessage = errorText_;
9611 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9612 stream_.callbackInfo.isRunning = false; // exit from the thread
9616 errorCallback( type, errorMessage );
9617 firstErrorOccurred_ = false;
9621 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9622 std::cerr << '\n' << errorText_ << "\n\n";
9623 else if ( type != RtAudioError::WARNING )
9624 throw( RtAudioError( errorText_, type ) );
9627 void RtApi :: verifyStream()
9629 if ( stream_.state == STREAM_CLOSED ) {
9630 errorText_ = "RtApi:: a stream is not open!";
9631 error( RtAudioError::INVALID_USE );
9635 void RtApi :: clearStreamInfo()
9637 stream_.mode = UNINITIALIZED;
9638 stream_.state = STREAM_CLOSED;
9639 stream_.sampleRate = 0;
9640 stream_.bufferSize = 0;
9641 stream_.nBuffers = 0;
9642 stream_.userFormat = 0;
9643 stream_.userInterleaved = true;
9644 stream_.streamTime = 0.0;
9645 stream_.apiHandle = 0;
9646 stream_.deviceBuffer = 0;
9647 stream_.callbackInfo.callback = 0;
9648 stream_.callbackInfo.userData = 0;
9649 stream_.callbackInfo.isRunning = false;
9650 stream_.callbackInfo.errorCallback = 0;
9651 for ( int i=0; i<2; i++ ) {
9652 stream_.device[i] = 11111;
9653 stream_.doConvertBuffer[i] = false;
9654 stream_.deviceInterleaved[i] = true;
9655 stream_.doByteSwap[i] = false;
9656 stream_.nUserChannels[i] = 0;
9657 stream_.nDeviceChannels[i] = 0;
9658 stream_.channelOffset[i] = 0;
9659 stream_.deviceFormat[i] = 0;
9660 stream_.latency[i] = 0;
9661 stream_.userBuffer[i] = 0;
9662 stream_.convertInfo[i].channels = 0;
9663 stream_.convertInfo[i].inJump = 0;
9664 stream_.convertInfo[i].outJump = 0;
9665 stream_.convertInfo[i].inFormat = 0;
9666 stream_.convertInfo[i].outFormat = 0;
9667 stream_.convertInfo[i].inOffset.clear();
9668 stream_.convertInfo[i].outOffset.clear();
9672 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9674 if ( format == RTAUDIO_SINT16 )
9676 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9678 else if ( format == RTAUDIO_FLOAT64 )
9680 else if ( format == RTAUDIO_SINT24 )
9682 else if ( format == RTAUDIO_SINT8 )
9685 errorText_ = "RtApi::formatBytes: undefined format.";
9686 error( RtAudioError::WARNING );
9691 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9693 if ( mode == INPUT ) { // convert device to user buffer
9694 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9695 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9696 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9697 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9699 else { // convert user to device buffer
9700 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9701 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9702 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9703 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9706 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9707 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9709 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9711 // Set up the interleave/deinterleave offsets.
9712 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9713 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9714 ( mode == INPUT && stream_.userInterleaved ) ) {
9715 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9716 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9717 stream_.convertInfo[mode].outOffset.push_back( k );
9718 stream_.convertInfo[mode].inJump = 1;
9722 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9723 stream_.convertInfo[mode].inOffset.push_back( k );
9724 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9725 stream_.convertInfo[mode].outJump = 1;
9729 else { // no (de)interleaving
9730 if ( stream_.userInterleaved ) {
9731 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9732 stream_.convertInfo[mode].inOffset.push_back( k );
9733 stream_.convertInfo[mode].outOffset.push_back( k );
9737 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9738 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9739 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9740 stream_.convertInfo[mode].inJump = 1;
9741 stream_.convertInfo[mode].outJump = 1;
9746 // Add channel offset.
9747 if ( firstChannel > 0 ) {
9748 if ( stream_.deviceInterleaved[mode] ) {
9749 if ( mode == OUTPUT ) {
9750 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9751 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9754 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9755 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9759 if ( mode == OUTPUT ) {
9760 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9761 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9764 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9765 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9771 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9773 // This function does format conversion, input/output channel compensation, and
9774 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9775 // the lower three bytes of a 32-bit integer.
9777 // Clear our device buffer when in/out duplex device channels are different
9778 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9779 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9780 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9783 if (info.outFormat == RTAUDIO_FLOAT64) {
9785 Float64 *out = (Float64 *)outBuffer;
9787 if (info.inFormat == RTAUDIO_SINT8) {
9788 signed char *in = (signed char *)inBuffer;
9789 scale = 1.0 / 127.5;
9790 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9791 for (j=0; j<info.channels; j++) {
9792 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9793 out[info.outOffset[j]] += 0.5;
9794 out[info.outOffset[j]] *= scale;
9797 out += info.outJump;
9800 else if (info.inFormat == RTAUDIO_SINT16) {
9801 Int16 *in = (Int16 *)inBuffer;
9802 scale = 1.0 / 32767.5;
9803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9804 for (j=0; j<info.channels; j++) {
9805 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9806 out[info.outOffset[j]] += 0.5;
9807 out[info.outOffset[j]] *= scale;
9810 out += info.outJump;
9813 else if (info.inFormat == RTAUDIO_SINT24) {
9814 Int24 *in = (Int24 *)inBuffer;
9815 scale = 1.0 / 8388607.5;
9816 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9817 for (j=0; j<info.channels; j++) {
9818 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9819 out[info.outOffset[j]] += 0.5;
9820 out[info.outOffset[j]] *= scale;
9823 out += info.outJump;
9826 else if (info.inFormat == RTAUDIO_SINT32) {
9827 Int32 *in = (Int32 *)inBuffer;
9828 scale = 1.0 / 2147483647.5;
9829 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9830 for (j=0; j<info.channels; j++) {
9831 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9832 out[info.outOffset[j]] += 0.5;
9833 out[info.outOffset[j]] *= scale;
9836 out += info.outJump;
9839 else if (info.inFormat == RTAUDIO_FLOAT32) {
9840 Float32 *in = (Float32 *)inBuffer;
9841 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9842 for (j=0; j<info.channels; j++) {
9843 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9846 out += info.outJump;
9849 else if (info.inFormat == RTAUDIO_FLOAT64) {
9850 // Channel compensation and/or (de)interleaving only.
9851 Float64 *in = (Float64 *)inBuffer;
9852 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9853 for (j=0; j<info.channels; j++) {
9854 out[info.outOffset[j]] = in[info.inOffset[j]];
9857 out += info.outJump;
9861 else if (info.outFormat == RTAUDIO_FLOAT32) {
9863 Float32 *out = (Float32 *)outBuffer;
9865 if (info.inFormat == RTAUDIO_SINT8) {
9866 signed char *in = (signed char *)inBuffer;
9867 scale = (Float32) ( 1.0 / 127.5 );
9868 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9869 for (j=0; j<info.channels; j++) {
9870 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9871 out[info.outOffset[j]] += 0.5;
9872 out[info.outOffset[j]] *= scale;
9875 out += info.outJump;
9878 else if (info.inFormat == RTAUDIO_SINT16) {
9879 Int16 *in = (Int16 *)inBuffer;
9880 scale = (Float32) ( 1.0 / 32767.5 );
9881 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9882 for (j=0; j<info.channels; j++) {
9883 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9884 out[info.outOffset[j]] += 0.5;
9885 out[info.outOffset[j]] *= scale;
9888 out += info.outJump;
9891 else if (info.inFormat == RTAUDIO_SINT24) {
9892 Int24 *in = (Int24 *)inBuffer;
9893 scale = (Float32) ( 1.0 / 8388607.5 );
9894 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9895 for (j=0; j<info.channels; j++) {
9896 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9897 out[info.outOffset[j]] += 0.5;
9898 out[info.outOffset[j]] *= scale;
9901 out += info.outJump;
9904 else if (info.inFormat == RTAUDIO_SINT32) {
9905 Int32 *in = (Int32 *)inBuffer;
9906 scale = (Float32) ( 1.0 / 2147483647.5 );
9907 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9908 for (j=0; j<info.channels; j++) {
9909 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9910 out[info.outOffset[j]] += 0.5;
9911 out[info.outOffset[j]] *= scale;
9914 out += info.outJump;
9917 else if (info.inFormat == RTAUDIO_FLOAT32) {
9918 // Channel compensation and/or (de)interleaving only.
9919 Float32 *in = (Float32 *)inBuffer;
9920 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9921 for (j=0; j<info.channels; j++) {
9922 out[info.outOffset[j]] = in[info.inOffset[j]];
9925 out += info.outJump;
9928 else if (info.inFormat == RTAUDIO_FLOAT64) {
9929 Float64 *in = (Float64 *)inBuffer;
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9931 for (j=0; j<info.channels; j++) {
9932 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9935 out += info.outJump;
9939 else if (info.outFormat == RTAUDIO_SINT32) {
9940 Int32 *out = (Int32 *)outBuffer;
9941 if (info.inFormat == RTAUDIO_SINT8) {
9942 signed char *in = (signed char *)inBuffer;
9943 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9944 for (j=0; j<info.channels; j++) {
9945 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9946 out[info.outOffset[j]] <<= 24;
9949 out += info.outJump;
9952 else if (info.inFormat == RTAUDIO_SINT16) {
9953 Int16 *in = (Int16 *)inBuffer;
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9955 for (j=0; j<info.channels; j++) {
9956 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9957 out[info.outOffset[j]] <<= 16;
9960 out += info.outJump;
9963 else if (info.inFormat == RTAUDIO_SINT24) {
9964 Int24 *in = (Int24 *)inBuffer;
9965 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9966 for (j=0; j<info.channels; j++) {
9967 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9968 out[info.outOffset[j]] <<= 8;
9971 out += info.outJump;
9974 else if (info.inFormat == RTAUDIO_SINT32) {
9975 // Channel compensation and/or (de)interleaving only.
9976 Int32 *in = (Int32 *)inBuffer;
9977 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9978 for (j=0; j<info.channels; j++) {
9979 out[info.outOffset[j]] = in[info.inOffset[j]];
9982 out += info.outJump;
9985 else if (info.inFormat == RTAUDIO_FLOAT32) {
9986 Float32 *in = (Float32 *)inBuffer;
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9988 for (j=0; j<info.channels; j++) {
9989 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9992 out += info.outJump;
9995 else if (info.inFormat == RTAUDIO_FLOAT64) {
9996 Float64 *in = (Float64 *)inBuffer;
9997 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9998 for (j=0; j<info.channels; j++) {
9999 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10002 out += info.outJump;
10006 else if (info.outFormat == RTAUDIO_SINT24) {
10007 Int24 *out = (Int24 *)outBuffer;
10008 if (info.inFormat == RTAUDIO_SINT8) {
10009 signed char *in = (signed char *)inBuffer;
10010 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10011 for (j=0; j<info.channels; j++) {
10012 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10013 //out[info.outOffset[j]] <<= 16;
10016 out += info.outJump;
10019 else if (info.inFormat == RTAUDIO_SINT16) {
10020 Int16 *in = (Int16 *)inBuffer;
10021 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10022 for (j=0; j<info.channels; j++) {
10023 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10024 //out[info.outOffset[j]] <<= 8;
10027 out += info.outJump;
10030 else if (info.inFormat == RTAUDIO_SINT24) {
10031 // Channel compensation and/or (de)interleaving only.
10032 Int24 *in = (Int24 *)inBuffer;
10033 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10034 for (j=0; j<info.channels; j++) {
10035 out[info.outOffset[j]] = in[info.inOffset[j]];
10038 out += info.outJump;
10041 else if (info.inFormat == RTAUDIO_SINT32) {
10042 Int32 *in = (Int32 *)inBuffer;
10043 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10044 for (j=0; j<info.channels; j++) {
10045 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10046 //out[info.outOffset[j]] >>= 8;
10049 out += info.outJump;
10052 else if (info.inFormat == RTAUDIO_FLOAT32) {
10053 Float32 *in = (Float32 *)inBuffer;
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10055 for (j=0; j<info.channels; j++) {
10056 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10059 out += info.outJump;
10062 else if (info.inFormat == RTAUDIO_FLOAT64) {
10063 Float64 *in = (Float64 *)inBuffer;
10064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10065 for (j=0; j<info.channels; j++) {
10066 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10069 out += info.outJump;
10073 else if (info.outFormat == RTAUDIO_SINT16) {
10074 Int16 *out = (Int16 *)outBuffer;
10075 if (info.inFormat == RTAUDIO_SINT8) {
10076 signed char *in = (signed char *)inBuffer;
10077 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10078 for (j=0; j<info.channels; j++) {
10079 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10080 out[info.outOffset[j]] <<= 8;
10083 out += info.outJump;
10086 else if (info.inFormat == RTAUDIO_SINT16) {
10087 // Channel compensation and/or (de)interleaving only.
10088 Int16 *in = (Int16 *)inBuffer;
10089 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10090 for (j=0; j<info.channels; j++) {
10091 out[info.outOffset[j]] = in[info.inOffset[j]];
10094 out += info.outJump;
10097 else if (info.inFormat == RTAUDIO_SINT24) {
10098 Int24 *in = (Int24 *)inBuffer;
10099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10100 for (j=0; j<info.channels; j++) {
10101 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10104 out += info.outJump;
10107 else if (info.inFormat == RTAUDIO_SINT32) {
10108 Int32 *in = (Int32 *)inBuffer;
10109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10110 for (j=0; j<info.channels; j++) {
10111 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10114 out += info.outJump;
10117 else if (info.inFormat == RTAUDIO_FLOAT32) {
10118 Float32 *in = (Float32 *)inBuffer;
10119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10120 for (j=0; j<info.channels; j++) {
10121 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10124 out += info.outJump;
10127 else if (info.inFormat == RTAUDIO_FLOAT64) {
10128 Float64 *in = (Float64 *)inBuffer;
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10130 for (j=0; j<info.channels; j++) {
10131 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10134 out += info.outJump;
10138 else if (info.outFormat == RTAUDIO_SINT8) {
10139 signed char *out = (signed char *)outBuffer;
10140 if (info.inFormat == RTAUDIO_SINT8) {
10141 // Channel compensation and/or (de)interleaving only.
10142 signed char *in = (signed char *)inBuffer;
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10144 for (j=0; j<info.channels; j++) {
10145 out[info.outOffset[j]] = in[info.inOffset[j]];
10148 out += info.outJump;
10151 if (info.inFormat == RTAUDIO_SINT16) {
10152 Int16 *in = (Int16 *)inBuffer;
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10154 for (j=0; j<info.channels; j++) {
10155 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10158 out += info.outJump;
10161 else if (info.inFormat == RTAUDIO_SINT24) {
10162 Int24 *in = (Int24 *)inBuffer;
10163 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10164 for (j=0; j<info.channels; j++) {
10165 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10168 out += info.outJump;
10171 else if (info.inFormat == RTAUDIO_SINT32) {
10172 Int32 *in = (Int32 *)inBuffer;
10173 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10174 for (j=0; j<info.channels; j++) {
10175 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10178 out += info.outJump;
10181 else if (info.inFormat == RTAUDIO_FLOAT32) {
10182 Float32 *in = (Float32 *)inBuffer;
10183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10184 for (j=0; j<info.channels; j++) {
10185 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10188 out += info.outJump;
10191 else if (info.inFormat == RTAUDIO_FLOAT64) {
10192 Float64 *in = (Float64 *)inBuffer;
10193 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10194 for (j=0; j<info.channels; j++) {
10195 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10198 out += info.outJump;
10204 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10205 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10206 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10208 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10214 if ( format == RTAUDIO_SINT16 ) {
10215 for ( unsigned int i=0; i<samples; i++ ) {
10216 // Swap 1st and 2nd bytes.
10221 // Increment 2 bytes.
10225 else if ( format == RTAUDIO_SINT32 ||
10226 format == RTAUDIO_FLOAT32 ) {
10227 for ( unsigned int i=0; i<samples; i++ ) {
10228 // Swap 1st and 4th bytes.
10233 // Swap 2nd and 3rd bytes.
10239 // Increment 3 more bytes.
10243 else if ( format == RTAUDIO_SINT24 ) {
10244 for ( unsigned int i=0; i<samples; i++ ) {
10245 // Swap 1st and 3rd bytes.
10250 // Increment 2 more bytes.
10254 else if ( format == RTAUDIO_FLOAT64 ) {
10255 for ( unsigned int i=0; i<samples; i++ ) {
10256 // Swap 1st and 8th bytes
10261 // Swap 2nd and 7th bytes
10267 // Swap 3rd and 6th bytes
10273 // Swap 4th and 5th bytes
10279 // Increment 5 more bytes.
10285 // Indentation settings for Vim and Emacs
10287 // Local Variables:
10288 // c-basic-offset: 2
10289 // indent-tabs-mode: nil
10292 // vim: et sts=2 sw=2