1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 6.0.0beta1
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
154 extern "C" const unsigned int rtaudio_num_compiled_apis =
155 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
158 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
159 // If the build breaks here, check that they match.
160 template<bool b> class StaticAssert { private: StaticAssert() {} };
161 template<> class StaticAssert<true>{ public: StaticAssert() {} };
162 class StaticAssertions { StaticAssertions() {
163 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
166 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
168 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
169 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
172 std::string RtAudio :: getApiName( RtAudio::Api api )
174 if (api < 0 || api >= RtAudio::NUM_APIS)
176 return rtaudio_api_names[api][0];
179 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
181 if (api < 0 || api >= RtAudio::NUM_APIS)
183 return rtaudio_api_names[api][1];
186 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
189 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
190 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
191 return rtaudio_compiled_apis[i];
192 return RtAudio::UNSPECIFIED;
195 void RtAudio :: openRtApi( RtAudio::Api api )
201 #if defined(__UNIX_JACK__)
202 if ( api == UNIX_JACK )
203 rtapi_ = new RtApiJack();
205 #if defined(__LINUX_ALSA__)
206 if ( api == LINUX_ALSA )
207 rtapi_ = new RtApiAlsa();
209 #if defined(__LINUX_PULSE__)
210 if ( api == LINUX_PULSE )
211 rtapi_ = new RtApiPulse();
213 #if defined(__LINUX_OSS__)
214 if ( api == LINUX_OSS )
215 rtapi_ = new RtApiOss();
217 #if defined(__WINDOWS_ASIO__)
218 if ( api == WINDOWS_ASIO )
219 rtapi_ = new RtApiAsio();
221 #if defined(__WINDOWS_WASAPI__)
222 if ( api == WINDOWS_WASAPI )
223 rtapi_ = new RtApiWasapi();
225 #if defined(__WINDOWS_DS__)
226 if ( api == WINDOWS_DS )
227 rtapi_ = new RtApiDs();
229 #if defined(__MACOSX_CORE__)
230 if ( api == MACOSX_CORE )
231 rtapi_ = new RtApiCore();
233 #if defined(__RTAUDIO_DUMMY__)
234 if ( api == RTAUDIO_DUMMY )
235 rtapi_ = new RtApiDummy();
239 RtAudio :: RtAudio( RtAudio::Api api, RtAudioErrorCallback errorCallback )
243 std::string errorMessage;
244 if ( api != UNSPECIFIED ) {
245 // Attempt to open the specified API.
249 if ( errorCallback ) rtapi_->setErrorCallback( errorCallback );
253 // No compiled support for specified API value. Issue a warning
254 // and continue as if no API was specified.
255 errorMessage = "RtAudio: no compiled support for specified API argument!";
257 errorCallback( RTAUDIO_INVALID_USE, errorMessage );
259 std::cerr << '\n' << errorMessage << '\n' << std::endl;
262 // Iterate through the compiled APIs and return as soon as we find
263 // one with at least one device or we reach the end of the list.
264 std::vector< RtAudio::Api > apis;
265 getCompiledApi( apis );
266 for ( unsigned int i=0; i<apis.size(); i++ ) {
267 openRtApi( apis[i] );
268 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
272 if ( errorCallback ) rtapi_->setErrorCallback( errorCallback );
276 // It should not be possible to get here because the preprocessor
277 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
278 // if no API-specific definitions are passed to the compiler. But just
279 // in case something weird happens, issue an error message and abort.
280 errorMessage = "RtAudio: no compiled API support found ... critical error!";
282 errorCallback( RTAUDIO_INVALID_USE, errorMessage );
284 std::cerr << '\n' << errorMessage << '\n' << std::endl;
288 RtAudio :: ~RtAudio()
294 RtAudioErrorType RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
295 RtAudio::StreamParameters *inputParameters,
296 RtAudioFormat format, unsigned int sampleRate,
297 unsigned int *bufferFrames,
298 RtAudioCallback callback, void *userData,
299 RtAudio::StreamOptions *options )
301 return rtapi_->openStream( outputParameters, inputParameters, format,
302 sampleRate, bufferFrames, callback,
306 // *************************************************** //
308 // Public RtApi definitions (see end of file for
309 // private or protected utility functions).
311 // *************************************************** //
316 MUTEX_INITIALIZE( &stream_.mutex );
318 showWarnings_ = true;
323 MUTEX_DESTROY( &stream_.mutex );
326 RtAudioErrorType RtApi :: openStream( RtAudio::StreamParameters *oParams,
327 RtAudio::StreamParameters *iParams,
328 RtAudioFormat format, unsigned int sampleRate,
329 unsigned int *bufferFrames,
330 RtAudioCallback callback, void *userData,
331 RtAudio::StreamOptions *options )
333 if ( stream_.state != STREAM_CLOSED ) {
334 errorText_ = "RtApi::openStream: a stream is already open!";
335 return error( RTAUDIO_INVALID_USE );
338 // Clear stream information potentially left from a previously open stream.
341 if ( oParams && oParams->nChannels < 1 ) {
342 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
343 return error( RTAUDIO_INVALID_USE );
346 if ( iParams && iParams->nChannels < 1 ) {
347 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
348 return error( RTAUDIO_INVALID_USE );
351 if ( oParams == NULL && iParams == NULL ) {
352 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
353 return error( RTAUDIO_INVALID_USE );
356 if ( formatBytes(format) == 0 ) {
357 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
358 return error( RTAUDIO_INVALID_USE );
361 unsigned int nDevices = getDeviceCount();
362 unsigned int oChannels = 0;
364 oChannels = oParams->nChannels;
365 if ( oParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
367 return error( RTAUDIO_INVALID_USE );
371 unsigned int iChannels = 0;
373 iChannels = iParams->nChannels;
374 if ( iParams->deviceId >= nDevices ) {
375 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
376 return error( RTAUDIO_INVALID_USE );
382 if ( oChannels > 0 ) {
384 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
385 sampleRate, format, bufferFrames, options );
386 if ( result == false ) {
387 return error( RTAUDIO_SYSTEM_ERROR );
391 if ( iChannels > 0 ) {
393 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
394 sampleRate, format, bufferFrames, options );
395 if ( result == false ) {
396 return error( RTAUDIO_SYSTEM_ERROR );
400 stream_.callbackInfo.callback = (void *) callback;
401 stream_.callbackInfo.userData = userData;
403 if ( options ) options->numberOfBuffers = stream_.nBuffers;
404 stream_.state = STREAM_STOPPED;
405 return RTAUDIO_NO_ERROR;
408 unsigned int RtApi :: getDefaultInputDevice( void )
410 // Should be implemented in subclasses if possible.
414 unsigned int RtApi :: getDefaultOutputDevice( void )
416 // Should be implemented in subclasses if possible.
420 void RtApi :: closeStream( void )
422 // MUST be implemented in subclasses!
426 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
427 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
428 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
429 RtAudio::StreamOptions * /*options*/ )
431 // MUST be implemented in subclasses!
435 void RtApi :: tickStreamTime( void )
437 // Subclasses that do not provide their own implementation of
438 // getStreamTime should call this function once per buffer I/O to
439 // provide basic stream time support.
441 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
444 #if defined( HAVE_GETTIMEOFDAY )
445 gettimeofday( &stream_.lastTickTimestamp, NULL );
450 long RtApi :: getStreamLatency( void )
452 long totalLatency = 0;
453 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
454 totalLatency = stream_.latency[0];
455 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
456 totalLatency += stream_.latency[1];
462 double RtApi :: getStreamTime( void )
464 #if defined( HAVE_GETTIMEOFDAY )
465 // Return a very accurate estimate of the stream time by
466 // adding in the elapsed time since the last tick.
470 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
471 return stream_.streamTime;
473 gettimeofday( &now, NULL );
474 then = stream_.lastTickTimestamp;
475 return stream_.streamTime +
476 ((now.tv_sec + 0.000001 * now.tv_usec) -
477 (then.tv_sec + 0.000001 * then.tv_usec));
479 return stream_.streamTime;
484 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
489 #if defined( HAVE_GETTIMEOFDAY )
490 gettimeofday( &stream_.lastTickTimestamp, NULL );
495 unsigned int RtApi :: getStreamSampleRate( void )
497 if ( isStreamOpen() ) return stream_.sampleRate;
502 // *************************************************** //
504 // OS/API-specific methods.
506 // *************************************************** //
508 #if defined(__MACOSX_CORE__)
510 // The OS X CoreAudio API is designed to use a separate callback
511 // procedure for each of its audio devices. A single RtAudio duplex
512 // stream using two different devices is supported here, though it
513 // cannot be guaranteed to always behave correctly because we cannot
514 // synchronize these two callbacks.
516 // A property listener is installed for over/underrun information.
517 // However, no functionality is currently provided to allow property
518 // listeners to trigger user handlers because it is unclear what could
519 // be done if a critical stream parameter (buffer size, sample rate,
520 // device disconnect) notification arrived. The listeners entail
521 // quite a bit of extra code and most likely, a user program wouldn't
522 // be prepared for the result anyway. However, we do provide a flag
523 // to the client callback function to inform of an over/underrun.
525 // A structure to hold various information related to the CoreAudio API
528 AudioDeviceID id[2]; // device ids
529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
530 AudioDeviceIOProcID procId[2];
532 UInt32 iStream[2]; // device stream index (or first if using multiple)
533 UInt32 nStreams[2]; // number of streams to use
536 pthread_cond_t condition;
537 int drainCounter; // Tracks callback counts when draining
538 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 bool xrunListenerAdded[2];
540 bool disconnectListenerAdded[2];
543 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; procId[0] = 0; procId[1] = 0; xrun[0] = false; xrun[1] = false; xrunListenerAdded[0] = false; xrunListenerAdded[1] = false; disconnectListenerAdded[0] = false; disconnectListenerAdded[1] = false; }
546 RtApiCore:: RtApiCore()
548 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
549 // This is a largely undocumented but absolutely necessary
550 // requirement starting with OS-X 10.6. If not called, queries and
551 // updates to various audio device properties are not handled
553 CFRunLoopRef theRunLoop = NULL;
554 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
555 kAudioObjectPropertyScopeGlobal,
556 kAudioObjectPropertyElementMaster };
557 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
558 if ( result != noErr ) {
559 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
560 error( RTAUDIO_SYSTEM_ERROR );
565 RtApiCore :: ~RtApiCore()
567 // The subclass destructor gets called before the base class
568 // destructor, so close an existing stream before deallocating
569 // apiDeviceId memory.
570 if ( stream_.state != STREAM_CLOSED ) closeStream();
573 unsigned int RtApiCore :: getDeviceCount( void )
575 // Find out how many audio devices there are, if any.
577 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
578 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
579 if ( result != noErr ) {
580 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
581 error( RTAUDIO_SYSTEM_ERROR );
585 return dataSize / sizeof( AudioDeviceID );
588 unsigned int RtApiCore :: getDefaultInputDevice( void )
590 unsigned int nDevices = getDeviceCount();
591 if ( nDevices <= 1 ) return 0;
594 UInt32 dataSize = sizeof( AudioDeviceID );
595 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
596 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
597 if ( result != noErr ) {
598 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
599 error( RTAUDIO_SYSTEM_ERROR );
603 dataSize *= nDevices;
604 AudioDeviceID deviceList[ nDevices ];
605 property.mSelector = kAudioHardwarePropertyDevices;
606 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
607 if ( result != noErr ) {
608 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
609 error( RTAUDIO_SYSTEM_ERROR );
613 for ( unsigned int i=0; i<nDevices; i++ )
614 if ( id == deviceList[i] ) return i;
616 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
617 error( RTAUDIO_WARNING );
621 unsigned int RtApiCore :: getDefaultOutputDevice( void )
623 unsigned int nDevices = getDeviceCount();
624 if ( nDevices <= 1 ) return 0;
627 UInt32 dataSize = sizeof( AudioDeviceID );
628 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
629 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
630 if ( result != noErr ) {
631 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
632 error( RTAUDIO_SYSTEM_ERROR );
636 dataSize = sizeof( AudioDeviceID ) * nDevices;
637 AudioDeviceID deviceList[ nDevices ];
638 property.mSelector = kAudioHardwarePropertyDevices;
639 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
640 if ( result != noErr ) {
641 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
642 error( RTAUDIO_SYSTEM_ERROR );
646 for ( unsigned int i=0; i<nDevices; i++ )
647 if ( id == deviceList[i] ) return i;
649 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
650 error( RTAUDIO_WARNING );
654 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
656 RtAudio::DeviceInfo info;
660 unsigned int nDevices = getDeviceCount();
661 if ( nDevices == 0 ) {
662 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
663 error( RTAUDIO_INVALID_USE );
667 if ( device >= nDevices ) {
668 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
669 error( RTAUDIO_INVALID_USE );
673 AudioDeviceID deviceList[ nDevices ];
674 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
675 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
676 kAudioObjectPropertyScopeGlobal,
677 kAudioObjectPropertyElementMaster };
678 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
679 0, NULL, &dataSize, (void *) &deviceList );
680 if ( result != noErr ) {
681 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
682 error( RTAUDIO_WARNING );
686 AudioDeviceID id = deviceList[ device ];
688 // Get the device name.
691 dataSize = sizeof( CFStringRef );
692 property.mSelector = kAudioObjectPropertyManufacturer;
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
694 if ( result != noErr ) {
695 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
696 errorText_ = errorStream_.str();
697 error( RTAUDIO_WARNING );
701 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
702 long length = CFStringGetLength(cfname);
703 char *mname = (char *)malloc(length * 3 + 1);
704 #if defined( UNICODE ) || defined( _UNICODE )
705 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
707 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
709 info.name.append( (const char *)mname, strlen(mname) );
710 info.name.append( ": " );
714 property.mSelector = kAudioObjectPropertyName;
715 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
716 if ( result != noErr ) {
717 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
718 errorText_ = errorStream_.str();
719 error( RTAUDIO_WARNING );
723 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
724 length = CFStringGetLength(cfname);
725 char *name = (char *)malloc(length * 3 + 1);
726 #if defined( UNICODE ) || defined( _UNICODE )
727 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
729 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
731 info.name.append( (const char *)name, strlen(name) );
735 // Get the output stream "configuration".
736 AudioBufferList *bufferList = nil;
737 property.mSelector = kAudioDevicePropertyStreamConfiguration;
738 property.mScope = kAudioDevicePropertyScopeOutput;
739 // property.mElement = kAudioObjectPropertyElementWildcard;
741 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
742 if ( result != noErr || dataSize == 0 ) {
743 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
744 errorText_ = errorStream_.str();
745 error( RTAUDIO_WARNING );
749 // Allocate the AudioBufferList.
750 bufferList = (AudioBufferList *) malloc( dataSize );
751 if ( bufferList == NULL ) {
752 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
753 error( RTAUDIO_WARNING );
757 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
758 if ( result != noErr || dataSize == 0 ) {
760 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
761 errorText_ = errorStream_.str();
762 error( RTAUDIO_WARNING );
766 // Get output channel information.
767 unsigned int i, nStreams = bufferList->mNumberBuffers;
768 for ( i=0; i<nStreams; i++ )
769 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
772 // Get the input stream "configuration".
773 property.mScope = kAudioDevicePropertyScopeInput;
774 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
775 if ( result != noErr || dataSize == 0 ) {
776 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
777 errorText_ = errorStream_.str();
778 error( RTAUDIO_WARNING );
782 // Allocate the AudioBufferList.
783 bufferList = (AudioBufferList *) malloc( dataSize );
784 if ( bufferList == NULL ) {
785 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
786 error( RTAUDIO_WARNING );
790 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
791 if (result != noErr || dataSize == 0) {
793 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
794 errorText_ = errorStream_.str();
795 error( RTAUDIO_WARNING );
799 // Get input channel information.
800 nStreams = bufferList->mNumberBuffers;
801 for ( i=0; i<nStreams; i++ )
802 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
805 // If device opens for both playback and capture, we determine the channels.
806 if ( info.outputChannels > 0 && info.inputChannels > 0 )
807 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
809 // Probe the device sample rates.
810 bool isInput = false;
811 if ( info.outputChannels == 0 ) isInput = true;
813 // Determine the supported sample rates.
814 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
815 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
816 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
817 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
818 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
819 errorText_ = errorStream_.str();
820 error( RTAUDIO_WARNING );
824 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
825 AudioValueRange rangeList[ nRanges ];
826 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
827 if ( result != kAudioHardwareNoError ) {
828 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
829 errorText_ = errorStream_.str();
830 error( RTAUDIO_WARNING );
834 // The sample rate reporting mechanism is a bit of a mystery. It
835 // seems that it can either return individual rates or a range of
836 // rates. I assume that if the min / max range values are the same,
837 // then that represents a single supported rate and if the min / max
838 // range values are different, the device supports an arbitrary
839 // range of values (though there might be multiple ranges, so we'll
840 // use the most conservative range).
841 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
842 bool haveValueRange = false;
843 info.sampleRates.clear();
844 for ( UInt32 i=0; i<nRanges; i++ ) {
845 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
846 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
847 info.sampleRates.push_back( tmpSr );
849 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
850 info.preferredSampleRate = tmpSr;
853 haveValueRange = true;
854 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
855 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
859 if ( haveValueRange ) {
860 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
861 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
862 info.sampleRates.push_back( SAMPLE_RATES[k] );
864 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
865 info.preferredSampleRate = SAMPLE_RATES[k];
870 // Sort and remove any redundant values
871 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
872 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
874 if ( info.sampleRates.size() == 0 ) {
875 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
876 errorText_ = errorStream_.str();
877 error( RTAUDIO_WARNING );
881 // Probe the currently configured sample rate
883 dataSize = sizeof( Float64 );
884 property.mSelector = kAudioDevicePropertyNominalSampleRate;
885 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
886 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
888 // CoreAudio always uses 32-bit floating point data for PCM streams.
889 // Thus, any other "physical" formats supported by the device are of
890 // no interest to the client.
891 info.nativeFormats = RTAUDIO_FLOAT32;
893 if ( info.outputChannels > 0 )
894 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
895 if ( info.inputChannels > 0 )
896 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
902 static OSStatus callbackHandler( AudioDeviceID inDevice,
903 const AudioTimeStamp* /*inNow*/,
904 const AudioBufferList* inInputData,
905 const AudioTimeStamp* /*inInputTime*/,
906 AudioBufferList* outOutputData,
907 const AudioTimeStamp* /*inOutputTime*/,
910 CallbackInfo *info = (CallbackInfo *) infoPointer;
912 RtApiCore *object = (RtApiCore *) info->object;
913 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
914 return kAudioHardwareUnspecifiedError;
916 return kAudioHardwareNoError;
919 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
921 const AudioObjectPropertyAddress properties[],
924 for ( UInt32 i=0; i<nAddresses; i++ ) {
925 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
926 CallbackInfo *info = (CallbackInfo *) infoPointer;
927 RtApiCore *object = (RtApiCore *) info->object;
928 info->deviceDisconnected = true;
929 object->closeStream();
930 return kAudioHardwareUnspecifiedError;
934 return kAudioHardwareNoError;
937 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
939 const AudioObjectPropertyAddress properties[],
940 void* handlePointer )
942 CoreHandle *handle = (CoreHandle *) handlePointer;
943 for ( UInt32 i=0; i<nAddresses; i++ ) {
944 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
945 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
946 handle->xrun[1] = true;
948 handle->xrun[0] = true;
952 return kAudioHardwareNoError;
955 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
956 unsigned int firstChannel, unsigned int sampleRate,
957 RtAudioFormat format, unsigned int *bufferSize,
958 RtAudio::StreamOptions *options )
961 unsigned int nDevices = getDeviceCount();
962 if ( nDevices == 0 ) {
963 // This should not happen because a check is made before this function is called.
964 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
968 if ( device >= nDevices ) {
969 // This should not happen because a check is made before this function is called.
970 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
974 AudioDeviceID deviceList[ nDevices ];
975 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
976 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
977 kAudioObjectPropertyScopeGlobal,
978 kAudioObjectPropertyElementMaster };
979 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
980 0, NULL, &dataSize, (void *) &deviceList );
981 if ( result != noErr ) {
982 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
986 AudioDeviceID id = deviceList[ device ];
988 // Setup for stream mode.
989 bool isInput = false;
990 if ( mode == INPUT ) {
992 property.mScope = kAudioDevicePropertyScopeInput;
995 property.mScope = kAudioDevicePropertyScopeOutput;
997 // Get the stream "configuration".
998 AudioBufferList *bufferList = nil;
1000 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1001 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1002 if ( result != noErr || dataSize == 0 ) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Allocate the AudioBufferList.
1009 bufferList = (AudioBufferList *) malloc( dataSize );
1010 if ( bufferList == NULL ) {
1011 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1015 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1016 if (result != noErr || dataSize == 0) {
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1019 errorText_ = errorStream_.str();
1023 // Search for one or more streams that contain the desired number of
1024 // channels. CoreAudio devices can have an arbitrary number of
1025 // streams and each stream can have an arbitrary number of channels.
1026 // For each stream, a single buffer of interleaved samples is
1027 // provided. RtAudio prefers the use of one stream of interleaved
1028 // data or multiple consecutive single-channel streams. However, we
1029 // now support multiple consecutive multi-channel streams of
1030 // interleaved data as well.
1031 UInt32 iStream, offsetCounter = firstChannel;
1032 UInt32 nStreams = bufferList->mNumberBuffers;
1033 bool monoMode = false;
1034 bool foundStream = false;
1036 // First check that the device supports the requested number of
1038 UInt32 deviceChannels = 0;
1039 for ( iStream=0; iStream<nStreams; iStream++ )
1040 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1042 if ( deviceChannels < ( channels + firstChannel ) ) {
1044 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1045 errorText_ = errorStream_.str();
1049 // Look for a single stream meeting our needs.
1050 UInt32 firstStream = 0, streamCount = 1, streamChannels = 0, channelOffset = 0;
1051 for ( iStream=0; iStream<nStreams; iStream++ ) {
1052 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1053 if ( streamChannels >= channels + offsetCounter ) {
1054 firstStream = iStream;
1055 channelOffset = offsetCounter;
1059 if ( streamChannels > offsetCounter ) break;
1060 offsetCounter -= streamChannels;
1063 // If we didn't find a single stream above, then we should be able
1064 // to meet the channel specification with multiple streams.
1065 if ( foundStream == false ) {
1067 offsetCounter = firstChannel;
1068 for ( iStream=0; iStream<nStreams; iStream++ ) {
1069 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1070 if ( streamChannels > offsetCounter ) break;
1071 offsetCounter -= streamChannels;
1074 firstStream = iStream;
1075 channelOffset = offsetCounter;
1076 Int32 channelCounter = channels + offsetCounter - streamChannels;
1078 if ( streamChannels > 1 ) monoMode = false;
1079 while ( channelCounter > 0 ) {
1080 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1081 if ( streamChannels > 1 ) monoMode = false;
1082 channelCounter -= streamChannels;
1089 // Determine the buffer size.
1090 AudioValueRange bufferRange;
1091 dataSize = sizeof( AudioValueRange );
1092 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1093 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1095 if ( result != noErr ) {
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1097 errorText_ = errorStream_.str();
1101 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1102 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMaximum;
1103 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1105 // Set the buffer size. For multiple streams, I'm assuming we only
1106 // need to make this setting for the master channel.
1107 UInt32 theSize = (UInt32) *bufferSize;
1108 dataSize = sizeof( UInt32 );
1109 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1110 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1112 if ( result != noErr ) {
1113 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1114 errorText_ = errorStream_.str();
1118 // If attempting to setup a duplex stream, the bufferSize parameter
1119 // MUST be the same in both directions!
1120 *bufferSize = theSize;
1121 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1123 errorText_ = errorStream_.str();
1127 stream_.bufferSize = *bufferSize;
1128 stream_.nBuffers = 1;
1130 // Try to set "hog" mode ... it's not clear to me this is working.
1131 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1133 dataSize = sizeof( hog_pid );
1134 property.mSelector = kAudioDevicePropertyHogMode;
1135 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1136 if ( result != noErr ) {
1137 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1138 errorText_ = errorStream_.str();
1142 if ( hog_pid != getpid() ) {
1144 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1145 if ( result != noErr ) {
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1147 errorText_ = errorStream_.str();
1153 // Check and if necessary, change the sample rate for the device.
1154 Float64 nominalRate;
1155 dataSize = sizeof( Float64 );
1156 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1157 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1158 if ( result != noErr ) {
1159 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1160 errorText_ = errorStream_.str();
1164 // Only try to change the sample rate if off by more than 1 Hz.
1165 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1167 nominalRate = (Float64) sampleRate;
1168 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1169 if ( result != noErr ) {
1170 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1171 errorText_ = errorStream_.str();
1175 // Now wait until the reported nominal rate is what we just set.
1176 UInt32 microCounter = 0;
1177 Float64 reportedRate = 0.0;
1178 while ( reportedRate != nominalRate ) {
1179 microCounter += 5000;
1180 if ( microCounter > 2000000 ) break;
1182 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1185 if ( microCounter > 2000000 ) {
1186 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1187 errorText_ = errorStream_.str();
1192 // Now set the stream format for all streams. Also, check the
1193 // physical format of the device and change that if necessary.
1194 AudioStreamBasicDescription description;
1195 dataSize = sizeof( AudioStreamBasicDescription );
1196 property.mSelector = kAudioStreamPropertyVirtualFormat;
1197 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1198 if ( result != noErr ) {
1199 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1200 errorText_ = errorStream_.str();
1204 // Set the sample rate and data format id. However, only make the
1205 // change if the sample rate is not within 1.0 of the desired
1206 // rate and the format is not linear pcm.
1207 bool updateFormat = false;
1208 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1209 description.mSampleRate = (Float64) sampleRate;
1210 updateFormat = true;
1213 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1214 description.mFormatID = kAudioFormatLinearPCM;
1215 updateFormat = true;
1218 if ( updateFormat ) {
1219 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1220 if ( result != noErr ) {
1221 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1222 errorText_ = errorStream_.str();
1227 // Now check the physical format.
1228 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1229 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1230 if ( result != noErr ) {
1231 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1232 errorText_ = errorStream_.str();
1236 //std::cout << "Current physical stream format:" << std::endl;
1237 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1238 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1239 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1240 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1242 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1243 description.mFormatID = kAudioFormatLinearPCM;
1244 //description.mSampleRate = (Float64) sampleRate;
1245 AudioStreamBasicDescription testDescription = description;
1248 // We'll try higher bit rates first and then work our way down.
1249 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1255 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1257 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1259 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1261 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1263 bool setPhysicalFormat = false;
1264 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1265 testDescription = description;
1266 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1267 testDescription.mFormatFlags = physicalFormats[i].second;
1268 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1269 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1271 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1272 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1273 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1274 if ( result == noErr ) {
1275 setPhysicalFormat = true;
1276 //std::cout << "Updated physical stream format:" << std::endl;
1277 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1278 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1279 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1280 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1285 if ( !setPhysicalFormat ) {
1286 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1287 errorText_ = errorStream_.str();
1290 } // done setting virtual/physical formats.
1292 // Get the stream / device latency.
1294 dataSize = sizeof( UInt32 );
1295 property.mSelector = kAudioDevicePropertyLatency;
1296 if ( AudioObjectHasProperty( id, &property ) == true ) {
1297 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1298 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1300 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1301 errorText_ = errorStream_.str();
1302 error( RTAUDIO_WARNING );
1306 // Byte-swapping: According to AudioHardware.h, the stream data will
1307 // always be presented in native-endian format, so we should never
1308 // need to byte swap.
1309 stream_.doByteSwap[mode] = false;
1311 // From the CoreAudio documentation, PCM data must be supplied as
1313 stream_.userFormat = format;
1314 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1316 if ( streamCount == 1 )
1317 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1318 else // multiple streams
1319 stream_.nDeviceChannels[mode] = channels;
1320 stream_.nUserChannels[mode] = channels;
1321 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1322 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1323 else stream_.userInterleaved = true;
1324 stream_.deviceInterleaved[mode] = true;
1325 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1327 // Set flags for buffer conversion.
1328 stream_.doConvertBuffer[mode] = false;
1329 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1332 stream_.doConvertBuffer[mode] = true;
1333 if ( streamCount == 1 ) {
1334 if ( stream_.nUserChannels[mode] > 1 &&
1335 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1336 stream_.doConvertBuffer[mode] = true;
1338 else if ( monoMode && stream_.userInterleaved )
1339 stream_.doConvertBuffer[mode] = true;
1341 // Allocate our CoreHandle structure for the stream.
1342 CoreHandle *handle = 0;
1343 if ( stream_.apiHandle == 0 ) {
1345 handle = new CoreHandle;
1347 catch ( std::bad_alloc& ) {
1348 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1352 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1353 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1356 stream_.apiHandle = (void *) handle;
1359 handle = (CoreHandle *) stream_.apiHandle;
1360 handle->iStream[mode] = firstStream;
1361 handle->nStreams[mode] = streamCount;
1362 handle->id[mode] = id;
1364 // Allocate necessary internal buffers.
1365 unsigned long bufferBytes;
1366 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1367 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, sizeof(char) );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure and property listener per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1433 if ( result != noErr ) {
1434 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1435 errorText_ = errorStream_.str();
1438 handle->xrunListenerAdded[mode] = true;
1440 // Setup a listener to detect a possible device disconnect.
1441 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1442 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1443 if ( result != noErr ) {
1444 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1445 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1446 errorText_ = errorStream_.str();
1449 handle->disconnectListenerAdded[mode] = true;
1455 closeStream(); // this should safely clear out procedures, listeners and memory, even for duplex stream
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RTAUDIO_WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 if ( handle->xrunListenerAdded[0] ) {
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1478 error( RTAUDIO_WARNING );
1481 if ( handle->disconnectListenerAdded[0] ) {
1482 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1483 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1484 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1485 error( RTAUDIO_WARNING );
1489 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1490 if ( handle->procId[0] ) {
1491 if ( stream_.state == STREAM_RUNNING )
1492 AudioDeviceStop( handle->id[0], handle-procId[0] );
1493 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1495 #else // deprecated behaviour
1496 if ( stream_.state == STREAM_RUNNING )
1497 AudioDeviceStop( handle->id[0], callbackHandler );
1498 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1504 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1507 kAudioObjectPropertyScopeGlobal,
1508 kAudioObjectPropertyElementMaster };
1510 if ( handle->xrunListenerAdded[1] ) {
1511 property.mSelector = kAudioDeviceProcessorOverload;
1512 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1513 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1514 error( RTAUDIO_WARNING );
1518 if ( handle->disconnectListenerAdded[0] ) {
1519 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1520 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1521 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1522 error( RTAUDIO_WARNING );
1526 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1527 if ( handle->procId[1] ) {
1528 if ( stream_.state == STREAM_RUNNING )
1529 AudioDeviceStop( handle->id[1], handle->procId[1] );
1530 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1532 #else // deprecated behaviour
1533 if ( stream_.state == STREAM_RUNNING )
1534 AudioDeviceStop( handle->id[1], callbackHandler );
1535 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1540 for ( int i=0; i<2; i++ ) {
1541 if ( stream_.userBuffer[i] ) {
1542 free( stream_.userBuffer[i] );
1543 stream_.userBuffer[i] = 0;
1547 if ( stream_.deviceBuffer ) {
1548 free( stream_.deviceBuffer );
1549 stream_.deviceBuffer = 0;
1552 // Destroy pthread condition variable.
1553 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1554 pthread_cond_destroy( &handle->condition );
1556 stream_.apiHandle = 0;
1558 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1559 if ( info->deviceDisconnected ) {
1560 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1561 error( RTAUDIO_DEVICE_DISCONNECT );
1565 //stream_.mode = UNINITIALIZED;
1566 //stream_.state = STREAM_CLOSED;
1569 RtAudioErrorType RtApiCore :: startStream( void )
1571 if ( stream_.state != STREAM_STOPPED ) {
1572 if ( stream_.state == STREAM_RUNNING )
1573 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1574 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1575 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1576 return error( RTAUDIO_WARNING );
1580 #if defined( HAVE_GETTIMEOFDAY )
1581 gettimeofday( &stream_.lastTickTimestamp, NULL );
1585 OSStatus result = noErr;
1586 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1589 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1590 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1591 #else // deprecated behaviour
1592 result = AudioDeviceStart( handle->id[0], callbackHandler );
1594 if ( result != noErr ) {
1595 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1596 errorText_ = errorStream_.str();
1601 if ( stream_.mode == INPUT ||
1602 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1604 // Clear user input buffer
1605 unsigned long bufferBytes;
1606 bufferBytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
1607 memset( stream_.userBuffer[1], 0, bufferBytes * sizeof(char) );
1609 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1610 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1611 #else // deprecated behaviour
1612 result = AudioDeviceStart( handle->id[1], callbackHandler );
1614 if ( result != noErr ) {
1615 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1616 errorText_ = errorStream_.str();
1621 handle->drainCounter = 0;
1622 handle->internalDrain = false;
1623 stream_.state = STREAM_RUNNING;
1626 if ( result == noErr ) return RTAUDIO_NO_ERROR;
1627 return error( RTAUDIO_SYSTEM_ERROR );
1630 RtAudioErrorType RtApiCore :: stopStream( void )
1632 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1633 if ( stream_.state == STREAM_STOPPED )
1634 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1635 else if ( stream_.state == STREAM_CLOSED )
1636 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1637 return error( RTAUDIO_WARNING );
1640 OSStatus result = noErr;
1641 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1642 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1644 if ( handle->drainCounter == 0 ) {
1645 handle->drainCounter = 2;
1646 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1649 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1650 result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1652 result = AudioDeviceStop( handle->id[0], callbackHandler );
1654 if ( result != noErr ) {
1655 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1656 errorText_ = errorStream_.str();
1661 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1662 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1663 result = AudioDeviceStop( handle->id[1], handle->procId[1] );
1665 result = AudioDeviceStop( handle->id[1], callbackHandler );
1667 if ( result != noErr ) {
1668 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1669 errorText_ = errorStream_.str();
1674 stream_.state = STREAM_STOPPED;
1677 if ( result == noErr ) return RTAUDIO_NO_ERROR;
1678 return error( RTAUDIO_SYSTEM_ERROR );
1681 RtAudioErrorType RtApiCore :: abortStream( void )
1683 if ( stream_.state != STREAM_RUNNING ) {
1684 if ( stream_.state == STREAM_STOPPED )
1685 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1686 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1687 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1688 return error( RTAUDIO_WARNING );
1692 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1693 handle->drainCounter = 2;
1695 stream_.state = STREAM_STOPPING;
1696 return stopStream();
1699 // This function will be called by a spawned thread when the user
1700 // callback function signals that the stream should be stopped or
1701 // aborted. It is better to handle it this way because the
1702 // callbackEvent() function probably should return before the AudioDeviceStop()
1703 // function is called.
1704 static void *coreStopStream( void *ptr )
1706 CallbackInfo *info = (CallbackInfo *) ptr;
1707 RtApiCore *object = (RtApiCore *) info->object;
1709 object->stopStream();
1710 pthread_exit( NULL );
1713 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1714 const AudioBufferList *inBufferList,
1715 const AudioBufferList *outBufferList )
1717 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1718 if ( stream_.state == STREAM_CLOSED ) {
1719 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1720 error( RTAUDIO_WARNING );
1724 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1725 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1727 // Check if we were draining the stream and signal is finished.
1728 if ( handle->drainCounter > 3 ) {
1729 ThreadHandle threadId;
1731 stream_.state = STREAM_STOPPING;
1732 if ( handle->internalDrain == true )
1733 pthread_create( &threadId, NULL, coreStopStream, info );
1734 else // external call to stopStream()
1735 pthread_cond_signal( &handle->condition );
1739 AudioDeviceID outputDevice = handle->id[0];
1741 // Invoke user callback to get fresh output data UNLESS we are
1742 // draining stream or duplex mode AND the input/output devices are
1743 // different AND this function is called for the input device.
1744 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1745 RtAudioCallback callback = (RtAudioCallback) info->callback;
1746 double streamTime = getStreamTime();
1747 RtAudioStreamStatus status = 0;
1748 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1749 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1750 handle->xrun[0] = false;
1752 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1753 status |= RTAUDIO_INPUT_OVERFLOW;
1754 handle->xrun[1] = false;
1757 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1758 stream_.bufferSize, streamTime, status, info->userData );
1759 if ( cbReturnValue == 2 ) {
1763 else if ( cbReturnValue == 1 ) {
1764 handle->drainCounter = 1;
1765 handle->internalDrain = true;
1769 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1771 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1773 if ( handle->nStreams[0] == 1 ) {
1774 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1776 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1778 else { // fill multiple streams with zeros
1779 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1780 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1782 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1786 else if ( handle->nStreams[0] == 1 ) {
1787 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1788 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1789 stream_.userBuffer[0], stream_.convertInfo[0] );
1791 else { // copy from user buffer
1792 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1793 stream_.userBuffer[0],
1794 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1797 else { // fill multiple streams
1798 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1799 if ( stream_.doConvertBuffer[0] ) {
1800 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1801 inBuffer = (Float32 *) stream_.deviceBuffer;
1804 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1805 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1806 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1807 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1808 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1811 else { // fill multiple multi-channel streams with interleaved data
1812 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1815 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1816 UInt32 inChannels = stream_.nUserChannels[0];
1817 if ( stream_.doConvertBuffer[0] ) {
1818 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1819 inChannels = stream_.nDeviceChannels[0];
1822 if ( inInterleaved ) inOffset = 1;
1823 else inOffset = stream_.bufferSize;
1825 channelsLeft = inChannels;
1826 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1828 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1829 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1832 // Account for possible channel offset in first stream
1833 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1834 streamChannels -= stream_.channelOffset[0];
1835 outJump = stream_.channelOffset[0];
1839 // Account for possible unfilled channels at end of the last stream
1840 if ( streamChannels > channelsLeft ) {
1841 outJump = streamChannels - channelsLeft;
1842 streamChannels = channelsLeft;
1845 // Determine input buffer offsets and skips
1846 if ( inInterleaved ) {
1847 inJump = inChannels;
1848 in += inChannels - channelsLeft;
1852 in += (inChannels - channelsLeft) * inOffset;
1855 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1856 for ( unsigned int j=0; j<streamChannels; j++ ) {
1857 *out++ = in[j*inOffset];
1862 channelsLeft -= streamChannels;
1868 // Don't bother draining input
1869 if ( handle->drainCounter ) {
1870 handle->drainCounter++;
1874 AudioDeviceID inputDevice;
1875 inputDevice = handle->id[1];
1876 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1878 if ( handle->nStreams[1] == 1 ) {
1879 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1880 convertBuffer( stream_.userBuffer[1],
1881 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1882 stream_.convertInfo[1] );
1884 else { // copy to user buffer
1885 memcpy( stream_.userBuffer[1],
1886 inBufferList->mBuffers[handle->iStream[1]].mData,
1887 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1890 else { // read from multiple streams
1891 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1892 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1894 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1895 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1896 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1897 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1898 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1901 else { // read from multiple multi-channel streams
1902 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1905 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1906 UInt32 outChannels = stream_.nUserChannels[1];
1907 if ( stream_.doConvertBuffer[1] ) {
1908 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1909 outChannels = stream_.nDeviceChannels[1];
1912 if ( outInterleaved ) outOffset = 1;
1913 else outOffset = stream_.bufferSize;
1915 channelsLeft = outChannels;
1916 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1918 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1919 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1922 // Account for possible channel offset in first stream
1923 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1924 streamChannels -= stream_.channelOffset[1];
1925 inJump = stream_.channelOffset[1];
1929 // Account for possible unread channels at end of the last stream
1930 if ( streamChannels > channelsLeft ) {
1931 inJump = streamChannels - channelsLeft;
1932 streamChannels = channelsLeft;
1935 // Determine output buffer offsets and skips
1936 if ( outInterleaved ) {
1937 outJump = outChannels;
1938 out += outChannels - channelsLeft;
1942 out += (outChannels - channelsLeft) * outOffset;
1945 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1946 for ( unsigned int j=0; j<streamChannels; j++ ) {
1947 out[j*outOffset] = *in++;
1952 channelsLeft -= streamChannels;
1956 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1957 convertBuffer( stream_.userBuffer[1],
1958 stream_.deviceBuffer,
1959 stream_.convertInfo[1] );
1966 // Make sure to only tick duplex stream time once if using two devices
1967 if ( stream_.mode == DUPLEX ) {
1968 if ( handle->id[0] == handle->id[1] ) // same device, only one callback
1969 RtApi::tickStreamTime();
1970 else if ( deviceId == handle->id[0] )
1971 RtApi::tickStreamTime(); // two devices, only tick on the output callback
1973 RtApi::tickStreamTime(); // input or output stream only
1978 const char* RtApiCore :: getErrorCode( OSStatus code )
1982 case kAudioHardwareNotRunningError:
1983 return "kAudioHardwareNotRunningError";
1985 case kAudioHardwareUnspecifiedError:
1986 return "kAudioHardwareUnspecifiedError";
1988 case kAudioHardwareUnknownPropertyError:
1989 return "kAudioHardwareUnknownPropertyError";
1991 case kAudioHardwareBadPropertySizeError:
1992 return "kAudioHardwareBadPropertySizeError";
1994 case kAudioHardwareIllegalOperationError:
1995 return "kAudioHardwareIllegalOperationError";
1997 case kAudioHardwareBadObjectError:
1998 return "kAudioHardwareBadObjectError";
2000 case kAudioHardwareBadDeviceError:
2001 return "kAudioHardwareBadDeviceError";
2003 case kAudioHardwareBadStreamError:
2004 return "kAudioHardwareBadStreamError";
2006 case kAudioHardwareUnsupportedOperationError:
2007 return "kAudioHardwareUnsupportedOperationError";
2009 case kAudioDeviceUnsupportedFormatError:
2010 return "kAudioDeviceUnsupportedFormatError";
2012 case kAudioDevicePermissionsError:
2013 return "kAudioDevicePermissionsError";
2016 return "CoreAudio unknown error";
2020 //******************** End of __MACOSX_CORE__ *********************//
2023 #if defined(__UNIX_JACK__)
2025 // JACK is a low-latency audio server, originally written for the
2026 // GNU/Linux operating system and now also ported to OS-X and
2027 // Windows. It can connect a number of different applications to an
2028 // audio device, as well as allowing them to share audio between
2031 // When using JACK with RtAudio, "devices" refer to JACK clients that
2032 // have ports connected to the server. The JACK server is typically
2033 // started in a terminal as follows:
2035 // .jackd -d alsa -d hw:0
2037 // or through an interface program such as qjackctl. Many of the
2038 // parameters normally set for a stream are fixed by the JACK server
2039 // and can be specified when the JACK server is started. In
2042 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2044 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2045 // frames, and number of buffers = 4. Once the server is running, it
2046 // is not possible to override these values. If the values are not
2047 // specified in the command-line, the JACK server uses default values.
2049 // The JACK server does not have to be running when an instance of
2050 // RtApiJack is created, though the function getDeviceCount() will
2051 // report 0 devices found until JACK has been started. When no
2052 // devices are available (i.e., the JACK server is not running), a
2053 // stream cannot be opened.
2055 #include <jack/jack.h>
2059 // A structure to hold various information related to the Jack API
2062 jack_client_t *client;
2063 jack_port_t **ports[2];
2064 std::string deviceName[2];
2066 pthread_cond_t condition;
2067 int drainCounter; // Tracks callback counts when draining
2068 bool internalDrain; // Indicates if stop is initiated from callback or not.
2071 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2074 #if !defined(__RTAUDIO_DEBUG__)
2075 static void jackSilentError( const char * ) {};
2078 RtApiJack :: RtApiJack()
2079 :shouldAutoconnect_(true) {
2080 // Nothing to do here.
2081 #if !defined(__RTAUDIO_DEBUG__)
2082 // Turn off Jack's internal error reporting.
2083 jack_set_error_function( &jackSilentError );
2087 RtApiJack :: ~RtApiJack()
2089 if ( stream_.state != STREAM_CLOSED ) closeStream();
2092 unsigned int RtApiJack :: getDeviceCount( void )
2094 // See if we can become a jack client.
2095 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2096 jack_status_t *status = NULL;
2097 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2098 if ( client == 0 ) return 0;
2101 std::string port, previousPort;
2102 unsigned int nChannels = 0, nDevices = 0;
2103 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2105 // Parse the port names up to the first colon (:).
2108 port = (char *) ports[ nChannels ];
2109 iColon = port.find(":");
2110 if ( iColon != std::string::npos ) {
2111 port = port.substr( 0, iColon + 1 );
2112 if ( port != previousPort ) {
2114 previousPort = port;
2117 } while ( ports[++nChannels] );
2121 jack_client_close( client );
2125 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2127 RtAudio::DeviceInfo info;
2128 info.probed = false;
2130 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2131 jack_status_t *status = NULL;
2132 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2133 if ( client == 0 ) {
2134 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2135 error( RTAUDIO_WARNING );
2140 std::string port, previousPort;
2141 unsigned int nPorts = 0, nDevices = 0;
2142 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2144 // Parse the port names up to the first colon (:).
2147 port = (char *) ports[ nPorts ];
2148 iColon = port.find(":");
2149 if ( iColon != std::string::npos ) {
2150 port = port.substr( 0, iColon );
2151 if ( port != previousPort ) {
2152 if ( nDevices == device ) info.name = port;
2154 previousPort = port;
2157 } while ( ports[++nPorts] );
2161 if ( device >= nDevices ) {
2162 jack_client_close( client );
2163 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2164 error( RTAUDIO_INVALID_USE );
2168 // Get the current jack server sample rate.
2169 info.sampleRates.clear();
2171 info.preferredSampleRate = jack_get_sample_rate( client );
2172 info.sampleRates.push_back( info.preferredSampleRate );
2174 // Count the available ports containing the client name as device
2175 // channels. Jack "input ports" equal RtAudio output channels.
2176 unsigned int nChannels = 0;
2177 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2179 while ( ports[ nChannels ] ) nChannels++;
2181 info.outputChannels = nChannels;
2184 // Jack "output ports" equal RtAudio input channels.
2186 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2188 while ( ports[ nChannels ] ) nChannels++;
2190 info.inputChannels = nChannels;
2193 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2194 jack_client_close(client);
2195 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2196 error( RTAUDIO_WARNING );
2200 // If device opens for both playback and capture, we determine the channels.
2201 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2202 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2204 // Jack always uses 32-bit floats.
2205 info.nativeFormats = RTAUDIO_FLOAT32;
2207 // Jack doesn't provide default devices so we'll use the first available one.
2208 if ( device == 0 && info.outputChannels > 0 )
2209 info.isDefaultOutput = true;
2210 if ( device == 0 && info.inputChannels > 0 )
2211 info.isDefaultInput = true;
2213 jack_client_close(client);
2218 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2220 CallbackInfo *info = (CallbackInfo *) infoPointer;
2222 RtApiJack *object = (RtApiJack *) info->object;
2223 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2228 // This function will be called by a spawned thread when the Jack
2229 // server signals that it is shutting down. It is necessary to handle
2230 // it this way because the jackShutdown() function must return before
2231 // the jack_deactivate() function (in closeStream()) will return.
2232 static void *jackCloseStream( void *ptr )
2234 CallbackInfo *info = (CallbackInfo *) ptr;
2235 RtApiJack *object = (RtApiJack *) info->object;
2237 info->deviceDisconnected = true;
2238 object->closeStream();
2239 pthread_exit( NULL );
2242 static void jackShutdown( void *infoPointer )
2244 CallbackInfo *info = (CallbackInfo *) infoPointer;
2245 RtApiJack *object = (RtApiJack *) info->object;
2247 // Check current stream state. If stopped, then we'll assume this
2248 // was called as a result of a call to RtApiJack::stopStream (the
2249 // deactivation of a client handle causes this function to be called).
2250 // If not, we'll assume the Jack server is shutting down or some
2251 // other problem occurred and we should close the stream.
2252 if ( object->isStreamRunning() == false ) return;
2254 ThreadHandle threadId;
2255 pthread_create( &threadId, NULL, jackCloseStream, info );
2258 static int jackXrun( void *infoPointer )
2260 JackHandle *handle = *((JackHandle **) infoPointer);
2262 if ( handle->ports[0] ) handle->xrun[0] = true;
2263 if ( handle->ports[1] ) handle->xrun[1] = true;
2268 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2269 unsigned int firstChannel, unsigned int sampleRate,
2270 RtAudioFormat format, unsigned int *bufferSize,
2271 RtAudio::StreamOptions *options )
2273 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2275 // Look for jack server and try to become a client (only do once per stream).
2276 jack_client_t *client = 0;
2277 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2278 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2279 jack_status_t *status = NULL;
2280 if ( options && !options->streamName.empty() )
2281 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2283 client = jack_client_open( "RtApiJack", jackoptions, status );
2284 if ( client == 0 ) {
2285 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2286 error( RTAUDIO_WARNING );
2291 // The handle must have been created on an earlier pass.
2292 client = handle->client;
2296 std::string port, previousPort, deviceName;
2297 unsigned int nPorts = 0, nDevices = 0;
2298 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2300 // Parse the port names up to the first colon (:).
2303 port = (char *) ports[ nPorts ];
2304 iColon = port.find(":");
2305 if ( iColon != std::string::npos ) {
2306 port = port.substr( 0, iColon );
2307 if ( port != previousPort ) {
2308 if ( nDevices == device ) deviceName = port;
2310 previousPort = port;
2313 } while ( ports[++nPorts] );
2317 if ( device >= nDevices ) {
2318 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2322 unsigned long flag = JackPortIsInput;
2323 if ( mode == INPUT ) flag = JackPortIsOutput;
2325 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2326 // Count the available ports containing the client name as device
2327 // channels. Jack "input ports" equal RtAudio output channels.
2328 unsigned int nChannels = 0;
2329 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2331 while ( ports[ nChannels ] ) nChannels++;
2334 // Compare the jack ports for specified client to the requested number of channels.
2335 if ( nChannels < (channels + firstChannel) ) {
2336 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2337 errorText_ = errorStream_.str();
2342 // Check the jack server sample rate.
2343 unsigned int jackRate = jack_get_sample_rate( client );
2344 if ( sampleRate != jackRate ) {
2345 jack_client_close( client );
2346 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2347 errorText_ = errorStream_.str();
2350 stream_.sampleRate = jackRate;
2352 // Get the latency of the JACK port.
2353 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2354 if ( ports[ firstChannel ] ) {
2356 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2357 // the range (usually the min and max are equal)
2358 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2359 // get the latency range
2360 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2361 // be optimistic, use the min!
2362 stream_.latency[mode] = latrange.min;
2363 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2367 // The jack server always uses 32-bit floating-point data.
2368 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2369 stream_.userFormat = format;
2371 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2372 else stream_.userInterleaved = true;
2374 // Jack always uses non-interleaved buffers.
2375 stream_.deviceInterleaved[mode] = false;
2377 // Jack always provides host byte-ordered data.
2378 stream_.doByteSwap[mode] = false;
2380 // Get the buffer size. The buffer size and number of buffers
2381 // (periods) is set when the jack server is started.
2382 stream_.bufferSize = (int) jack_get_buffer_size( client );
2383 *bufferSize = stream_.bufferSize;
2385 stream_.nDeviceChannels[mode] = channels;
2386 stream_.nUserChannels[mode] = channels;
2388 // Set flags for buffer conversion.
2389 stream_.doConvertBuffer[mode] = false;
2390 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2391 stream_.doConvertBuffer[mode] = true;
2392 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2393 stream_.nUserChannels[mode] > 1 )
2394 stream_.doConvertBuffer[mode] = true;
2396 // Allocate our JackHandle structure for the stream.
2397 if ( handle == 0 ) {
2399 handle = new JackHandle;
2401 catch ( std::bad_alloc& ) {
2402 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2406 if ( pthread_cond_init(&handle->condition, NULL) ) {
2407 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2410 stream_.apiHandle = (void *) handle;
2411 handle->client = client;
2413 handle->deviceName[mode] = deviceName;
2415 // Allocate necessary internal buffers.
2416 unsigned long bufferBytes;
2417 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2418 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2419 if ( stream_.userBuffer[mode] == NULL ) {
2420 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2424 if ( stream_.doConvertBuffer[mode] ) {
2426 bool makeBuffer = true;
2427 if ( mode == OUTPUT )
2428 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2429 else { // mode == INPUT
2430 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2431 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2432 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2433 if ( bufferBytes < bytesOut ) makeBuffer = false;
2438 bufferBytes *= *bufferSize;
2439 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2440 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2441 if ( stream_.deviceBuffer == NULL ) {
2442 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2448 // Allocate memory for the Jack ports (channels) identifiers.
2449 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2450 if ( handle->ports[mode] == NULL ) {
2451 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2455 stream_.device[mode] = device;
2456 stream_.channelOffset[mode] = firstChannel;
2457 stream_.state = STREAM_STOPPED;
2458 stream_.callbackInfo.object = (void *) this;
2460 if ( stream_.mode == OUTPUT && mode == INPUT )
2461 // We had already set up the stream for output.
2462 stream_.mode = DUPLEX;
2464 stream_.mode = mode;
2465 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2466 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2467 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2470 // Register our ports.
2472 if ( mode == OUTPUT ) {
2473 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2474 snprintf( label, 64, "outport %d", i );
2475 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2476 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2480 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2481 snprintf( label, 64, "inport %d", i );
2482 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2483 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2487 // Setup the buffer conversion information structure. We don't use
2488 // buffers to do channel offsets, so we override that parameter
2490 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2492 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2498 pthread_cond_destroy( &handle->condition );
2499 jack_client_close( handle->client );
2501 if ( handle->ports[0] ) free( handle->ports[0] );
2502 if ( handle->ports[1] ) free( handle->ports[1] );
2505 stream_.apiHandle = 0;
2508 for ( int i=0; i<2; i++ ) {
2509 if ( stream_.userBuffer[i] ) {
2510 free( stream_.userBuffer[i] );
2511 stream_.userBuffer[i] = 0;
2515 if ( stream_.deviceBuffer ) {
2516 free( stream_.deviceBuffer );
2517 stream_.deviceBuffer = 0;
2523 void RtApiJack :: closeStream( void )
2525 if ( stream_.state == STREAM_CLOSED ) {
2526 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2527 error( RTAUDIO_WARNING );
2531 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2533 if ( stream_.state == STREAM_RUNNING )
2534 jack_deactivate( handle->client );
2536 jack_client_close( handle->client );
2540 if ( handle->ports[0] ) free( handle->ports[0] );
2541 if ( handle->ports[1] ) free( handle->ports[1] );
2542 pthread_cond_destroy( &handle->condition );
2544 stream_.apiHandle = 0;
2547 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2548 if ( info->deviceDisconnected ) {
2549 errorText_ = "RtApiJack: the Jack server is shutting down this client ... stream stopped and closed!";
2550 error( RTAUDIO_DEVICE_DISCONNECT );
2553 for ( int i=0; i<2; i++ ) {
2554 if ( stream_.userBuffer[i] ) {
2555 free( stream_.userBuffer[i] );
2556 stream_.userBuffer[i] = 0;
2560 if ( stream_.deviceBuffer ) {
2561 free( stream_.deviceBuffer );
2562 stream_.deviceBuffer = 0;
2566 //stream_.mode = UNINITIALIZED;
2567 //stream_.state = STREAM_CLOSED;
2570 RtAudioErrorType RtApiJack :: startStream( void )
2572 if ( stream_.state != STREAM_STOPPED ) {
2573 if ( stream_.state == STREAM_RUNNING )
2574 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2575 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
2576 errorText_ = "RtApiJack::startStream(): the stream is stopping or closed!";
2577 return error( RTAUDIO_WARNING );
2581 #if defined( HAVE_GETTIMEOFDAY )
2582 gettimeofday( &stream_.lastTickTimestamp, NULL );
2586 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2587 int result = jack_activate( handle->client );
2589 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2595 // Get the list of available ports.
2596 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2598 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2599 if ( ports == NULL) {
2600 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2604 // Now make the port connections. Since RtAudio wasn't designed to
2605 // allow the user to select particular channels of a device, we'll
2606 // just open the first "nChannels" ports with offset.
2607 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2609 if ( ports[ stream_.channelOffset[0] + i ] )
2610 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2613 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2620 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2622 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2623 if ( ports == NULL) {
2624 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2628 // Now make the port connections. See note above.
2629 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2631 if ( ports[ stream_.channelOffset[1] + i ] )
2632 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2635 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2642 handle->drainCounter = 0;
2643 handle->internalDrain = false;
2644 stream_.state = STREAM_RUNNING;
2647 if ( result == 0 ) return RTAUDIO_NO_ERROR;
2648 return error( RTAUDIO_SYSTEM_ERROR );
2651 RtAudioErrorType RtApiJack :: stopStream( void )
2653 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
2654 if ( stream_.state == STREAM_STOPPED )
2655 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2656 else if ( stream_.state == STREAM_CLOSED )
2657 errorText_ = "RtApiJack::stopStream(): the stream is closed!";
2658 return error( RTAUDIO_WARNING );
2661 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2662 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2664 if ( handle->drainCounter == 0 ) {
2665 handle->drainCounter = 2;
2666 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2670 jack_deactivate( handle->client );
2671 stream_.state = STREAM_STOPPED;
2672 return RTAUDIO_NO_ERROR;
2675 RtAudioErrorType RtApiJack :: abortStream( void )
2677 if ( stream_.state != STREAM_RUNNING ) {
2678 if ( stream_.state == STREAM_STOPPED )
2679 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2680 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
2681 errorText_ = "RtApiJack::abortStream(): the stream is stopping or closed!";
2682 return error( RTAUDIO_WARNING );
2685 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2686 handle->drainCounter = 2;
2688 return stopStream();
2691 // This function will be called by a spawned thread when the user
2692 // callback function signals that the stream should be stopped or
2693 // aborted. It is necessary to handle it this way because the
2694 // callbackEvent() function must return before the jack_deactivate()
2695 // function will return.
2696 static void *jackStopStream( void *ptr )
2698 CallbackInfo *info = (CallbackInfo *) ptr;
2699 RtApiJack *object = (RtApiJack *) info->object;
2701 object->stopStream();
2702 pthread_exit( NULL );
2705 bool RtApiJack :: callbackEvent( unsigned long nframes )
2707 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2708 if ( stream_.state == STREAM_CLOSED ) {
2709 errorText_ = "RtApiJack::callbackEvent(): the stream is closed ... this shouldn't happen!";
2710 error( RTAUDIO_WARNING );
2713 if ( stream_.bufferSize != nframes ) {
2714 errorText_ = "RtApiJack::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2715 error( RTAUDIO_WARNING );
2719 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2720 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2722 // Check if we were draining the stream and signal is finished.
2723 if ( handle->drainCounter > 3 ) {
2724 ThreadHandle threadId;
2726 stream_.state = STREAM_STOPPING;
2727 if ( handle->internalDrain == true )
2728 pthread_create( &threadId, NULL, jackStopStream, info );
2729 else // external call to stopStream()
2730 pthread_cond_signal( &handle->condition );
2734 // Invoke user callback first, to get fresh output data.
2735 if ( handle->drainCounter == 0 ) {
2736 RtAudioCallback callback = (RtAudioCallback) info->callback;
2737 double streamTime = getStreamTime();
2738 RtAudioStreamStatus status = 0;
2739 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2740 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2741 handle->xrun[0] = false;
2743 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2744 status |= RTAUDIO_INPUT_OVERFLOW;
2745 handle->xrun[1] = false;
2747 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2748 stream_.bufferSize, streamTime, status, info->userData );
2749 if ( cbReturnValue == 2 ) {
2750 stream_.state = STREAM_STOPPING;
2751 handle->drainCounter = 2;
2753 pthread_create( &id, NULL, jackStopStream, info );
2756 else if ( cbReturnValue == 1 ) {
2757 handle->drainCounter = 1;
2758 handle->internalDrain = true;
2762 jack_default_audio_sample_t *jackbuffer;
2763 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2764 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2766 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2768 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2769 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2770 memset( jackbuffer, 0, bufferBytes );
2774 else if ( stream_.doConvertBuffer[0] ) {
2776 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2778 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2779 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2780 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2783 else { // no buffer conversion
2784 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2785 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2786 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2791 // Don't bother draining input
2792 if ( handle->drainCounter ) {
2793 handle->drainCounter++;
2797 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2799 if ( stream_.doConvertBuffer[1] ) {
2800 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2801 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2802 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2804 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2806 else { // no buffer conversion
2807 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2808 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2809 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2815 RtApi::tickStreamTime();
2818 //******************** End of __UNIX_JACK__ *********************//
2821 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2823 // The ASIO API is designed around a callback scheme, so this
2824 // implementation is similar to that used for OS-X CoreAudio and Linux
2825 // Jack. The primary constraint with ASIO is that it only allows
2826 // access to a single driver at a time. Thus, it is not possible to
2827 // have more than one simultaneous RtAudio stream.
2829 // This implementation also requires a number of external ASIO files
2830 // and a few global variables. The ASIO callback scheme does not
2831 // allow for the passing of user data, so we must create a global
2832 // pointer to our callbackInfo structure.
2834 // On unix systems, we make use of a pthread condition variable.
2835 // Since there is no equivalent in Windows, I hacked something based
2836 // on information found in
2837 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2839 #include "asiosys.h"
2841 #include "iasiothiscallresolver.h"
2842 #include "asiodrivers.h"
2845 static AsioDrivers drivers;
2846 static ASIOCallbacks asioCallbacks;
2847 static ASIODriverInfo driverInfo;
2848 static CallbackInfo *asioCallbackInfo;
2849 static bool asioXRun;
2852 int drainCounter; // Tracks callback counts when draining
2853 bool internalDrain; // Indicates if stop is initiated from callback or not.
2854 ASIOBufferInfo *bufferInfos;
2858 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2861 // Function declarations (definitions at end of section)
2862 static const char* getAsioErrorString( ASIOError result );
2863 static void sampleRateChanged( ASIOSampleRate sRate );
2864 static long asioMessages( long selector, long value, void* message, double* opt );
2866 RtApiAsio :: RtApiAsio()
2868 // ASIO cannot run on a multi-threaded appartment. You can call
2869 // CoInitialize beforehand, but it must be for appartment threading
2870 // (in which case, CoInitilialize will return S_FALSE here).
2871 coInitialized_ = false;
2872 HRESULT hr = CoInitialize( NULL );
2874 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2875 error( RtAudioError::WARNING );
2877 coInitialized_ = true;
2879 drivers.removeCurrentDriver();
2880 driverInfo.asioVersion = 2;
2882 // See note in DirectSound implementation about GetDesktopWindow().
2883 driverInfo.sysRef = GetForegroundWindow();
2886 RtApiAsio :: ~RtApiAsio()
2888 if ( stream_.state != STREAM_CLOSED ) closeStream();
2889 if ( coInitialized_ ) CoUninitialize();
2892 unsigned int RtApiAsio :: getDeviceCount( void )
2894 return (unsigned int) drivers.asioGetNumDev();
2897 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2899 RtAudio::DeviceInfo info;
2900 info.probed = false;
2903 unsigned int nDevices = getDeviceCount();
2904 if ( nDevices == 0 ) {
2905 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2906 error( RtAudioError::INVALID_USE );
2910 if ( device >= nDevices ) {
2911 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2912 error( RtAudioError::INVALID_USE );
2916 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2917 if ( stream_.state != STREAM_CLOSED ) {
2918 if ( device >= devices_.size() ) {
2919 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2920 error( RtAudioError::WARNING );
2923 return devices_[ device ];
2926 char driverName[32];
2927 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2928 if ( result != ASE_OK ) {
2929 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2930 errorText_ = errorStream_.str();
2931 error( RtAudioError::WARNING );
2935 info.name = driverName;
2937 if ( !drivers.loadDriver( driverName ) ) {
2938 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2939 errorText_ = errorStream_.str();
2940 error( RtAudioError::WARNING );
2944 result = ASIOInit( &driverInfo );
2945 if ( result != ASE_OK ) {
2946 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2947 errorText_ = errorStream_.str();
2948 error( RtAudioError::WARNING );
2952 // Determine the device channel information.
2953 long inputChannels, outputChannels;
2954 result = ASIOGetChannels( &inputChannels, &outputChannels );
2955 if ( result != ASE_OK ) {
2956 drivers.removeCurrentDriver();
2957 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2958 errorText_ = errorStream_.str();
2959 error( RtAudioError::WARNING );
2963 info.outputChannels = outputChannels;
2964 info.inputChannels = inputChannels;
2965 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2966 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2968 // Determine the supported sample rates.
2969 info.sampleRates.clear();
2970 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2971 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2972 if ( result == ASE_OK ) {
2973 info.sampleRates.push_back( SAMPLE_RATES[i] );
2975 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2976 info.preferredSampleRate = SAMPLE_RATES[i];
2980 // Determine supported data types ... just check first channel and assume rest are the same.
2981 ASIOChannelInfo channelInfo;
2982 channelInfo.channel = 0;
2983 channelInfo.isInput = true;
2984 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2985 result = ASIOGetChannelInfo( &channelInfo );
2986 if ( result != ASE_OK ) {
2987 drivers.removeCurrentDriver();
2988 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2989 errorText_ = errorStream_.str();
2990 error( RtAudioError::WARNING );
2994 info.nativeFormats = 0;
2995 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2996 info.nativeFormats |= RTAUDIO_SINT16;
2997 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2998 info.nativeFormats |= RTAUDIO_SINT32;
2999 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
3000 info.nativeFormats |= RTAUDIO_FLOAT32;
3001 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
3002 info.nativeFormats |= RTAUDIO_FLOAT64;
3003 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
3004 info.nativeFormats |= RTAUDIO_SINT24;
3006 if ( info.outputChannels > 0 )
3007 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
3008 if ( info.inputChannels > 0 )
3009 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
3012 drivers.removeCurrentDriver();
3016 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
3018 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
3019 object->callbackEvent( index );
3022 void RtApiAsio :: saveDeviceInfo( void )
3026 unsigned int nDevices = getDeviceCount();
3027 devices_.resize( nDevices );
3028 for ( unsigned int i=0; i<nDevices; i++ )
3029 devices_[i] = getDeviceInfo( i );
3032 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3033 unsigned int firstChannel, unsigned int sampleRate,
3034 RtAudioFormat format, unsigned int *bufferSize,
3035 RtAudio::StreamOptions *options )
3036 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3038 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3040 // For ASIO, a duplex stream MUST use the same driver.
3041 if ( isDuplexInput && stream_.device[0] != device ) {
3042 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3046 char driverName[32];
3047 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3048 if ( result != ASE_OK ) {
3049 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3050 errorText_ = errorStream_.str();
3054 // Only load the driver once for duplex stream.
3055 if ( !isDuplexInput ) {
3056 // The getDeviceInfo() function will not work when a stream is open
3057 // because ASIO does not allow multiple devices to run at the same
3058 // time. Thus, we'll probe the system before opening a stream and
3059 // save the results for use by getDeviceInfo().
3060 this->saveDeviceInfo();
3062 if ( !drivers.loadDriver( driverName ) ) {
3063 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3064 errorText_ = errorStream_.str();
3068 result = ASIOInit( &driverInfo );
3069 if ( result != ASE_OK ) {
3070 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3071 errorText_ = errorStream_.str();
3076 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3077 bool buffersAllocated = false;
3078 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3079 unsigned int nChannels;
3082 // Check the device channel count.
3083 long inputChannels, outputChannels;
3084 result = ASIOGetChannels( &inputChannels, &outputChannels );
3085 if ( result != ASE_OK ) {
3086 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3087 errorText_ = errorStream_.str();
3091 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3092 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3093 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3094 errorText_ = errorStream_.str();
3097 stream_.nDeviceChannels[mode] = channels;
3098 stream_.nUserChannels[mode] = channels;
3099 stream_.channelOffset[mode] = firstChannel;
3101 // Verify the sample rate is supported.
3102 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3103 if ( result != ASE_OK ) {
3104 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3105 errorText_ = errorStream_.str();
3109 // Get the current sample rate
3110 ASIOSampleRate currentRate;
3111 result = ASIOGetSampleRate( ¤tRate );
3112 if ( result != ASE_OK ) {
3113 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3114 errorText_ = errorStream_.str();
3118 // Set the sample rate only if necessary
3119 if ( currentRate != sampleRate ) {
3120 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3121 if ( result != ASE_OK ) {
3122 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3123 errorText_ = errorStream_.str();
3128 // Determine the driver data type.
3129 ASIOChannelInfo channelInfo;
3130 channelInfo.channel = 0;
3131 if ( mode == OUTPUT ) channelInfo.isInput = false;
3132 else channelInfo.isInput = true;
3133 result = ASIOGetChannelInfo( &channelInfo );
3134 if ( result != ASE_OK ) {
3135 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3136 errorText_ = errorStream_.str();
3140 // Assuming WINDOWS host is always little-endian.
3141 stream_.doByteSwap[mode] = false;
3142 stream_.userFormat = format;
3143 stream_.deviceFormat[mode] = 0;
3144 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3145 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3146 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3148 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3149 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3150 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3152 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3153 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3154 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3156 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3157 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3158 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3160 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3161 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3162 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3165 if ( stream_.deviceFormat[mode] == 0 ) {
3166 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3167 errorText_ = errorStream_.str();
3171 // Set the buffer size. For a duplex stream, this will end up
3172 // setting the buffer size based on the input constraints, which
3174 long minSize, maxSize, preferSize, granularity;
3175 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3176 if ( result != ASE_OK ) {
3177 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3178 errorText_ = errorStream_.str();
3182 if ( isDuplexInput ) {
3183 // When this is the duplex input (output was opened before), then we have to use the same
3184 // buffersize as the output, because it might use the preferred buffer size, which most
3185 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3186 // So instead of throwing an error, make them equal. The caller uses the reference
3187 // to the "bufferSize" param as usual to set up processing buffers.
3189 *bufferSize = stream_.bufferSize;
3192 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3193 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3194 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3195 else if ( granularity == -1 ) {
3196 // Make sure bufferSize is a power of two.
3197 int log2_of_min_size = 0;
3198 int log2_of_max_size = 0;
3200 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3201 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3202 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3205 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3206 int min_delta_num = log2_of_min_size;
3208 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3209 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3210 if (current_delta < min_delta) {
3211 min_delta = current_delta;
3216 *bufferSize = ( (unsigned int)1 << min_delta_num );
3217 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3218 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3220 else if ( granularity != 0 ) {
3221 // Set to an even multiple of granularity, rounding up.
3222 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3227 // we don't use it anymore, see above!
3228 // Just left it here for the case...
3229 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3230 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3235 stream_.bufferSize = *bufferSize;
3236 stream_.nBuffers = 2;
3238 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3239 else stream_.userInterleaved = true;
3241 // ASIO always uses non-interleaved buffers.
3242 stream_.deviceInterleaved[mode] = false;
3244 // Allocate, if necessary, our AsioHandle structure for the stream.
3245 if ( handle == 0 ) {
3247 handle = new AsioHandle;
3249 catch ( std::bad_alloc& ) {
3250 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3253 handle->bufferInfos = 0;
3255 // Create a manual-reset event.
3256 handle->condition = CreateEvent( NULL, // no security
3257 TRUE, // manual-reset
3258 FALSE, // non-signaled initially
3260 stream_.apiHandle = (void *) handle;
3263 // Create the ASIO internal buffers. Since RtAudio sets up input
3264 // and output separately, we'll have to dispose of previously
3265 // created output buffers for a duplex stream.
3266 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3267 ASIODisposeBuffers();
3268 if ( handle->bufferInfos ) free( handle->bufferInfos );
3271 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3273 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3274 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3275 if ( handle->bufferInfos == NULL ) {
3276 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3277 errorText_ = errorStream_.str();
3281 ASIOBufferInfo *infos;
3282 infos = handle->bufferInfos;
3283 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3284 infos->isInput = ASIOFalse;
3285 infos->channelNum = i + stream_.channelOffset[0];
3286 infos->buffers[0] = infos->buffers[1] = 0;
3288 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3289 infos->isInput = ASIOTrue;
3290 infos->channelNum = i + stream_.channelOffset[1];
3291 infos->buffers[0] = infos->buffers[1] = 0;
3294 // prepare for callbacks
3295 stream_.sampleRate = sampleRate;
3296 stream_.device[mode] = device;
3297 stream_.mode = isDuplexInput ? DUPLEX : mode;
3299 // store this class instance before registering callbacks, that are going to use it
3300 asioCallbackInfo = &stream_.callbackInfo;
3301 stream_.callbackInfo.object = (void *) this;
3303 // Set up the ASIO callback structure and create the ASIO data buffers.
3304 asioCallbacks.bufferSwitch = &bufferSwitch;
3305 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3306 asioCallbacks.asioMessage = &asioMessages;
3307 asioCallbacks.bufferSwitchTimeInfo = NULL;
3308 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3309 if ( result != ASE_OK ) {
3310 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3311 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3312 // In that case, let's be naïve and try that instead.
3313 *bufferSize = preferSize;
3314 stream_.bufferSize = *bufferSize;
3315 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3318 if ( result != ASE_OK ) {
3319 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3320 errorText_ = errorStream_.str();
3323 buffersAllocated = true;
3324 stream_.state = STREAM_STOPPED;
3326 // Set flags for buffer conversion.
3327 stream_.doConvertBuffer[mode] = false;
3328 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3329 stream_.doConvertBuffer[mode] = true;
3330 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3331 stream_.nUserChannels[mode] > 1 )
3332 stream_.doConvertBuffer[mode] = true;
3334 // Allocate necessary internal buffers
3335 unsigned long bufferBytes;
3336 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3337 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3338 if ( stream_.userBuffer[mode] == NULL ) {
3339 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3343 if ( stream_.doConvertBuffer[mode] ) {
3345 bool makeBuffer = true;
3346 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3347 if ( isDuplexInput && stream_.deviceBuffer ) {
3348 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3349 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3353 bufferBytes *= *bufferSize;
3354 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3355 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3356 if ( stream_.deviceBuffer == NULL ) {
3357 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3363 // Determine device latencies
3364 long inputLatency, outputLatency;
3365 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3368 errorText_ = errorStream_.str();
3369 error( RtAudioError::WARNING); // warn but don't fail
3372 stream_.latency[0] = outputLatency;
3373 stream_.latency[1] = inputLatency;
3376 // Setup the buffer conversion information structure. We don't use
3377 // buffers to do channel offsets, so we override that parameter
3379 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3384 if ( !isDuplexInput ) {
3385 // the cleanup for error in the duplex input, is done by RtApi::openStream
3386 // So we clean up for single channel only
3388 if ( buffersAllocated )
3389 ASIODisposeBuffers();
3391 drivers.removeCurrentDriver();
3394 CloseHandle( handle->condition );
3395 if ( handle->bufferInfos )
3396 free( handle->bufferInfos );
3399 stream_.apiHandle = 0;
3403 if ( stream_.userBuffer[mode] ) {
3404 free( stream_.userBuffer[mode] );
3405 stream_.userBuffer[mode] = 0;
3408 if ( stream_.deviceBuffer ) {
3409 free( stream_.deviceBuffer );
3410 stream_.deviceBuffer = 0;
3415 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3417 void RtApiAsio :: closeStream()
3419 if ( stream_.state == STREAM_CLOSED ) {
3420 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3421 error( RtAudioError::WARNING );
3425 if ( stream_.state == STREAM_RUNNING ) {
3426 stream_.state = STREAM_STOPPED;
3429 ASIODisposeBuffers();
3430 drivers.removeCurrentDriver();
3432 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3434 CloseHandle( handle->condition );
3435 if ( handle->bufferInfos )
3436 free( handle->bufferInfos );
3438 stream_.apiHandle = 0;
3441 for ( int i=0; i<2; i++ ) {
3442 if ( stream_.userBuffer[i] ) {
3443 free( stream_.userBuffer[i] );
3444 stream_.userBuffer[i] = 0;
3448 if ( stream_.deviceBuffer ) {
3449 free( stream_.deviceBuffer );
3450 stream_.deviceBuffer = 0;
3453 stream_.mode = UNINITIALIZED;
3454 stream_.state = STREAM_CLOSED;
3457 bool stopThreadCalled = false;
3459 void RtApiAsio :: startStream()
3462 if ( stream_.state == STREAM_RUNNING ) {
3463 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3464 error( RtAudioError::WARNING );
3468 #if defined( HAVE_GETTIMEOFDAY )
3469 gettimeofday( &stream_.lastTickTimestamp, NULL );
3472 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3473 ASIOError result = ASIOStart();
3474 if ( result != ASE_OK ) {
3475 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3476 errorText_ = errorStream_.str();
3480 handle->drainCounter = 0;
3481 handle->internalDrain = false;
3482 ResetEvent( handle->condition );
3483 stream_.state = STREAM_RUNNING;
3487 stopThreadCalled = false;
3489 if ( result == ASE_OK ) return;
3490 error( RtAudioError::SYSTEM_ERROR );
3493 void RtApiAsio :: stopStream()
3496 if ( stream_.state == STREAM_STOPPED ) {
3497 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3498 error( RtAudioError::WARNING );
3502 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3503 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3504 if ( handle->drainCounter == 0 ) {
3505 handle->drainCounter = 2;
3506 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3510 stream_.state = STREAM_STOPPED;
3512 ASIOError result = ASIOStop();
3513 if ( result != ASE_OK ) {
3514 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3515 errorText_ = errorStream_.str();
3518 if ( result == ASE_OK ) return;
3519 error( RtAudioError::SYSTEM_ERROR );
3522 void RtApiAsio :: abortStream()
3525 if ( stream_.state == STREAM_STOPPED ) {
3526 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3527 error( RtAudioError::WARNING );
3531 // The following lines were commented-out because some behavior was
3532 // noted where the device buffers need to be zeroed to avoid
3533 // continuing sound, even when the device buffers are completely
3534 // disposed. So now, calling abort is the same as calling stop.
3535 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3536 // handle->drainCounter = 2;
3540 // This function will be called by a spawned thread when the user
3541 // callback function signals that the stream should be stopped or
3542 // aborted. It is necessary to handle it this way because the
3543 // callbackEvent() function must return before the ASIOStop()
3544 // function will return.
3545 static unsigned __stdcall asioStopStream( void *ptr )
3547 CallbackInfo *info = (CallbackInfo *) ptr;
3548 RtApiAsio *object = (RtApiAsio *) info->object;
3550 object->stopStream();
3555 bool RtApiAsio :: callbackEvent( long bufferIndex )
3557 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3558 if ( stream_.state == STREAM_CLOSED ) {
3559 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3560 error( RtAudioError::WARNING );
3564 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3565 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3567 // Check if we were draining the stream and signal if finished.
3568 if ( handle->drainCounter > 3 ) {
3570 stream_.state = STREAM_STOPPING;
3571 if ( handle->internalDrain == false )
3572 SetEvent( handle->condition );
3573 else { // spawn a thread to stop the stream
3575 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3576 &stream_.callbackInfo, 0, &threadId );
3581 // Invoke user callback to get fresh output data UNLESS we are
3583 if ( handle->drainCounter == 0 ) {
3584 RtAudioCallback callback = (RtAudioCallback) info->callback;
3585 double streamTime = getStreamTime();
3586 RtAudioStreamStatus status = 0;
3587 if ( stream_.mode != INPUT && asioXRun == true ) {
3588 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3591 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3592 status |= RTAUDIO_INPUT_OVERFLOW;
3595 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3596 stream_.bufferSize, streamTime, status, info->userData );
3597 if ( cbReturnValue == 2 ) {
3598 stream_.state = STREAM_STOPPING;
3599 handle->drainCounter = 2;
3601 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3602 &stream_.callbackInfo, 0, &threadId );
3605 else if ( cbReturnValue == 1 ) {
3606 handle->drainCounter = 1;
3607 handle->internalDrain = true;
3611 unsigned int nChannels, bufferBytes, i, j;
3612 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3613 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3615 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3617 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3619 for ( i=0, j=0; i<nChannels; i++ ) {
3620 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3621 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3625 else if ( stream_.doConvertBuffer[0] ) {
3627 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3628 if ( stream_.doByteSwap[0] )
3629 byteSwapBuffer( stream_.deviceBuffer,
3630 stream_.bufferSize * stream_.nDeviceChannels[0],
3631 stream_.deviceFormat[0] );
3633 for ( i=0, j=0; i<nChannels; i++ ) {
3634 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3635 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3636 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3642 if ( stream_.doByteSwap[0] )
3643 byteSwapBuffer( stream_.userBuffer[0],
3644 stream_.bufferSize * stream_.nUserChannels[0],
3645 stream_.userFormat );
3647 for ( i=0, j=0; i<nChannels; i++ ) {
3648 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3649 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3650 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3656 // Don't bother draining input
3657 if ( handle->drainCounter ) {
3658 handle->drainCounter++;
3662 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3664 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3666 if (stream_.doConvertBuffer[1]) {
3668 // Always interleave ASIO input data.
3669 for ( i=0, j=0; i<nChannels; i++ ) {
3670 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3671 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3672 handle->bufferInfos[i].buffers[bufferIndex],
3676 if ( stream_.doByteSwap[1] )
3677 byteSwapBuffer( stream_.deviceBuffer,
3678 stream_.bufferSize * stream_.nDeviceChannels[1],
3679 stream_.deviceFormat[1] );
3680 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3684 for ( i=0, j=0; i<nChannels; i++ ) {
3685 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3686 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3687 handle->bufferInfos[i].buffers[bufferIndex],
3692 if ( stream_.doByteSwap[1] )
3693 byteSwapBuffer( stream_.userBuffer[1],
3694 stream_.bufferSize * stream_.nUserChannels[1],
3695 stream_.userFormat );
3700 // The following call was suggested by Malte Clasen. While the API
3701 // documentation indicates it should not be required, some device
3702 // drivers apparently do not function correctly without it.
3705 RtApi::tickStreamTime();
3709 static void sampleRateChanged( ASIOSampleRate sRate )
3711 // The ASIO documentation says that this usually only happens during
3712 // external sync. Audio processing is not stopped by the driver,
3713 // actual sample rate might not have even changed, maybe only the
3714 // sample rate status of an AES/EBU or S/PDIF digital input at the
3717 RtApi *object = (RtApi *) asioCallbackInfo->object;
3719 object->stopStream();
3721 catch ( RtAudioError &exception ) {
3722 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3726 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3729 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3733 switch( selector ) {
3734 case kAsioSelectorSupported:
3735 if ( value == kAsioResetRequest
3736 || value == kAsioEngineVersion
3737 || value == kAsioResyncRequest
3738 || value == kAsioLatenciesChanged
3739 // The following three were added for ASIO 2.0, you don't
3740 // necessarily have to support them.
3741 || value == kAsioSupportsTimeInfo
3742 || value == kAsioSupportsTimeCode
3743 || value == kAsioSupportsInputMonitor)
3746 case kAsioResetRequest:
3747 // Defer the task and perform the reset of the driver during the
3748 // next "safe" situation. You cannot reset the driver right now,
3749 // as this code is called from the driver. Reset the driver is
3750 // done by completely destruct is. I.e. ASIOStop(),
3751 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3753 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3756 case kAsioResyncRequest:
3757 // This informs the application that the driver encountered some
3758 // non-fatal data loss. It is used for synchronization purposes
3759 // of different media. Added mainly to work around the Win16Mutex
3760 // problems in Windows 95/98 with the Windows Multimedia system,
3761 // which could lose data because the Mutex was held too long by
3762 // another thread. However a driver can issue it in other
3764 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3768 case kAsioLatenciesChanged:
3769 // This will inform the host application that the drivers were
3770 // latencies changed. Beware, it this does not mean that the
3771 // buffer sizes have changed! You might need to update internal
3773 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3776 case kAsioEngineVersion:
3777 // Return the supported ASIO version of the host application. If
3778 // a host application does not implement this selector, ASIO 1.0
3779 // is assumed by the driver.
3782 case kAsioSupportsTimeInfo:
3783 // Informs the driver whether the
3784 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3785 // For compatibility with ASIO 1.0 drivers the host application
3786 // should always support the "old" bufferSwitch method, too.
3789 case kAsioSupportsTimeCode:
3790 // Informs the driver whether application is interested in time
3791 // code info. If an application does not need to know about time
3792 // code, the driver has less work to do.
3799 static const char* getAsioErrorString( ASIOError result )
3807 static const Messages m[] =
3809 { ASE_NotPresent, "Hardware input or output is not present or available." },
3810 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3811 { ASE_InvalidParameter, "Invalid input parameter." },
3812 { ASE_InvalidMode, "Invalid mode." },
3813 { ASE_SPNotAdvancing, "Sample position not advancing." },
3814 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3815 { ASE_NoMemory, "Not enough memory to complete the request." }
3818 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3819 if ( m[i].value == result ) return m[i].message;
3821 return "Unknown error.";
3824 //******************** End of __WINDOWS_ASIO__ *********************//
3828 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3830 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3831 // - Introduces support for the Windows WASAPI API
3832 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3833 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3834 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3841 #include <mferror.h>
3843 #include <mftransform.h>
3844 #include <wmcodecdsp.h>
3846 #include <audioclient.h>
3848 #include <mmdeviceapi.h>
3849 #include <functiondiscoverykeys_devpkey.h>
3851 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3852 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3855 #ifndef MFSTARTUP_NOSOCKET
3856 #define MFSTARTUP_NOSOCKET 0x1
3860 #pragma comment( lib, "ksuser" )
3861 #pragma comment( lib, "mfplat.lib" )
3862 #pragma comment( lib, "mfuuid.lib" )
3863 #pragma comment( lib, "wmcodecdspuuid" )
3866 //=============================================================================
3868 #define SAFE_RELEASE( objectPtr )\
3871 objectPtr->Release();\
3875 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3877 //-----------------------------------------------------------------------------
3879 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3880 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3881 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3882 // provide intermediate storage for read / write synchronization.
3896 // sets the length of the internal ring buffer
3897 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3900 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3902 bufferSize_ = bufferSize;
3907 // attempt to push a buffer into the ring buffer at the current "in" index
3908 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3910 if ( !buffer || // incoming buffer is NULL
3911 bufferSize == 0 || // incoming buffer has no data
3912 bufferSize > bufferSize_ ) // incoming buffer too large
3917 unsigned int relOutIndex = outIndex_;
3918 unsigned int inIndexEnd = inIndex_ + bufferSize;
3919 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3920 relOutIndex += bufferSize_;
3923 // the "IN" index CAN BEGIN at the "OUT" index
3924 // the "IN" index CANNOT END at the "OUT" index
3925 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3926 return false; // not enough space between "in" index and "out" index
3929 // copy buffer from external to internal
3930 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3931 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3932 int fromInSize = bufferSize - fromZeroSize;
3937 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3938 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3940 case RTAUDIO_SINT16:
3941 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3942 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3944 case RTAUDIO_SINT24:
3945 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3946 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3948 case RTAUDIO_SINT32:
3949 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3950 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3952 case RTAUDIO_FLOAT32:
3953 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3954 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3956 case RTAUDIO_FLOAT64:
3957 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3958 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3962 // update "in" index
3963 inIndex_ += bufferSize;
3964 inIndex_ %= bufferSize_;
3969 // attempt to pull a buffer from the ring buffer from the current "out" index
3970 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3972 if ( !buffer || // incoming buffer is NULL
3973 bufferSize == 0 || // incoming buffer has no data
3974 bufferSize > bufferSize_ ) // incoming buffer too large
3979 unsigned int relInIndex = inIndex_;
3980 unsigned int outIndexEnd = outIndex_ + bufferSize;
3981 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3982 relInIndex += bufferSize_;
3985 // the "OUT" index CANNOT BEGIN at the "IN" index
3986 // the "OUT" index CAN END at the "IN" index
3987 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3988 return false; // not enough space between "out" index and "in" index
3991 // copy buffer from internal to external
3992 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3993 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3994 int fromOutSize = bufferSize - fromZeroSize;
3999 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
4000 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
4002 case RTAUDIO_SINT16:
4003 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
4004 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
4006 case RTAUDIO_SINT24:
4007 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
4008 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
4010 case RTAUDIO_SINT32:
4011 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
4012 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
4014 case RTAUDIO_FLOAT32:
4015 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
4016 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
4018 case RTAUDIO_FLOAT64:
4019 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
4020 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
4024 // update "out" index
4025 outIndex_ += bufferSize;
4026 outIndex_ %= bufferSize_;
4033 unsigned int bufferSize_;
4034 unsigned int inIndex_;
4035 unsigned int outIndex_;
4038 //-----------------------------------------------------------------------------
4040 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4041 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4042 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4043 class WasapiResampler
4046 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4047 unsigned int inSampleRate, unsigned int outSampleRate )
4048 : _bytesPerSample( bitsPerSample / 8 )
4049 , _channelCount( channelCount )
4050 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4051 , _transformUnk( NULL )
4052 , _transform( NULL )
4053 , _mediaType( NULL )
4054 , _inputMediaType( NULL )
4055 , _outputMediaType( NULL )
4057 #ifdef __IWMResamplerProps_FWD_DEFINED__
4058 , _resamplerProps( NULL )
4061 // 1. Initialization
4063 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4065 // 2. Create Resampler Transform Object
4067 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4068 IID_IUnknown, ( void** ) &_transformUnk );
4070 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4072 #ifdef __IWMResamplerProps_FWD_DEFINED__
4073 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4074 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4077 // 3. Specify input / output format
4079 MFCreateMediaType( &_mediaType );
4080 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4081 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4082 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4083 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4084 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4085 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4086 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4087 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4089 MFCreateMediaType( &_inputMediaType );
4090 _mediaType->CopyAllItems( _inputMediaType );
4092 _transform->SetInputType( 0, _inputMediaType, 0 );
4094 MFCreateMediaType( &_outputMediaType );
4095 _mediaType->CopyAllItems( _outputMediaType );
4097 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4098 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4100 _transform->SetOutputType( 0, _outputMediaType, 0 );
4102 // 4. Send stream start messages to Resampler
4104 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4105 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4106 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4111 // 8. Send stream stop messages to Resampler
4113 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4114 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4120 SAFE_RELEASE( _transformUnk );
4121 SAFE_RELEASE( _transform );
4122 SAFE_RELEASE( _mediaType );
4123 SAFE_RELEASE( _inputMediaType );
4124 SAFE_RELEASE( _outputMediaType );
4126 #ifdef __IWMResamplerProps_FWD_DEFINED__
4127 SAFE_RELEASE( _resamplerProps );
4131 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4133 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4134 if ( _sampleRatio == 1 )
4136 // no sample rate conversion required
4137 memcpy( outBuffer, inBuffer, inputBufferSize );
4138 outSampleCount = inSampleCount;
4142 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4144 IMFMediaBuffer* rInBuffer;
4145 IMFSample* rInSample;
4146 BYTE* rInByteBuffer = NULL;
4148 // 5. Create Sample object from input data
4150 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4152 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4153 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4154 rInBuffer->Unlock();
4155 rInByteBuffer = NULL;
4157 rInBuffer->SetCurrentLength( inputBufferSize );
4159 MFCreateSample( &rInSample );
4160 rInSample->AddBuffer( rInBuffer );
4162 // 6. Pass input data to Resampler
4164 _transform->ProcessInput( 0, rInSample, 0 );
4166 SAFE_RELEASE( rInBuffer );
4167 SAFE_RELEASE( rInSample );
4169 // 7. Perform sample rate conversion
4171 IMFMediaBuffer* rOutBuffer = NULL;
4172 BYTE* rOutByteBuffer = NULL;
4174 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4176 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4178 // 7.1 Create Sample object for output data
4180 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4181 MFCreateSample( &( rOutDataBuffer.pSample ) );
4182 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4183 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4184 rOutDataBuffer.dwStreamID = 0;
4185 rOutDataBuffer.dwStatus = 0;
4186 rOutDataBuffer.pEvents = NULL;
4188 // 7.2 Get output data from Resampler
4190 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4193 SAFE_RELEASE( rOutBuffer );
4194 SAFE_RELEASE( rOutDataBuffer.pSample );
4198 // 7.3 Write output data to outBuffer
4200 SAFE_RELEASE( rOutBuffer );
4201 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4202 rOutBuffer->GetCurrentLength( &rBytes );
4204 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4205 memcpy( outBuffer, rOutByteBuffer, rBytes );
4206 rOutBuffer->Unlock();
4207 rOutByteBuffer = NULL;
4209 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4210 SAFE_RELEASE( rOutBuffer );
4211 SAFE_RELEASE( rOutDataBuffer.pSample );
4215 unsigned int _bytesPerSample;
4216 unsigned int _channelCount;
4219 IUnknown* _transformUnk;
4220 IMFTransform* _transform;
4221 IMFMediaType* _mediaType;
4222 IMFMediaType* _inputMediaType;
4223 IMFMediaType* _outputMediaType;
4225 #ifdef __IWMResamplerProps_FWD_DEFINED__
4226 IWMResamplerProps* _resamplerProps;
4230 //-----------------------------------------------------------------------------
4232 // A structure to hold various information related to the WASAPI implementation.
4235 IAudioClient* captureAudioClient;
4236 IAudioClient* renderAudioClient;
4237 IAudioCaptureClient* captureClient;
4238 IAudioRenderClient* renderClient;
4239 HANDLE captureEvent;
4243 : captureAudioClient( NULL ),
4244 renderAudioClient( NULL ),
4245 captureClient( NULL ),
4246 renderClient( NULL ),
4247 captureEvent( NULL ),
4248 renderEvent( NULL ) {}
4251 //=============================================================================
4253 RtApiWasapi::RtApiWasapi()
4254 : coInitialized_( false ), deviceEnumerator_( NULL )
4256 // WASAPI can run either apartment or multi-threaded
4257 HRESULT hr = CoInitialize( NULL );
4258 if ( !FAILED( hr ) )
4259 coInitialized_ = true;
4261 // Instantiate device enumerator
4262 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4263 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4264 ( void** ) &deviceEnumerator_ );
4266 // If this runs on an old Windows, it will fail. Ignore and proceed.
4268 deviceEnumerator_ = NULL;
4271 //-----------------------------------------------------------------------------
4273 RtApiWasapi::~RtApiWasapi()
4275 if ( stream_.state != STREAM_CLOSED )
4278 SAFE_RELEASE( deviceEnumerator_ );
4280 // If this object previously called CoInitialize()
4281 if ( coInitialized_ )
4285 //=============================================================================
4287 unsigned int RtApiWasapi::getDeviceCount( void )
4289 unsigned int captureDeviceCount = 0;
4290 unsigned int renderDeviceCount = 0;
4292 IMMDeviceCollection* captureDevices = NULL;
4293 IMMDeviceCollection* renderDevices = NULL;
4295 if ( !deviceEnumerator_ )
4298 // Count capture devices
4300 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4301 if ( FAILED( hr ) ) {
4302 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4306 hr = captureDevices->GetCount( &captureDeviceCount );
4307 if ( FAILED( hr ) ) {
4308 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4312 // Count render devices
4313 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4314 if ( FAILED( hr ) ) {
4315 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4319 hr = renderDevices->GetCount( &renderDeviceCount );
4320 if ( FAILED( hr ) ) {
4321 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4326 // release all references
4327 SAFE_RELEASE( captureDevices );
4328 SAFE_RELEASE( renderDevices );
4330 if ( errorText_.empty() )
4331 return captureDeviceCount + renderDeviceCount;
4333 error( RtAudioError::DRIVER_ERROR );
4337 //-----------------------------------------------------------------------------
4339 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4341 RtAudio::DeviceInfo info;
4342 unsigned int captureDeviceCount = 0;
4343 unsigned int renderDeviceCount = 0;
4344 std::string defaultDeviceName;
4345 bool isCaptureDevice = false;
4347 PROPVARIANT deviceNameProp;
4348 PROPVARIANT defaultDeviceNameProp;
4350 IMMDeviceCollection* captureDevices = NULL;
4351 IMMDeviceCollection* renderDevices = NULL;
4352 IMMDevice* devicePtr = NULL;
4353 IMMDevice* defaultDevicePtr = NULL;
4354 IAudioClient* audioClient = NULL;
4355 IPropertyStore* devicePropStore = NULL;
4356 IPropertyStore* defaultDevicePropStore = NULL;
4358 WAVEFORMATEX* deviceFormat = NULL;
4359 WAVEFORMATEX* closestMatchFormat = NULL;
4362 info.probed = false;
4364 // Count capture devices
4366 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4367 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4368 if ( FAILED( hr ) ) {
4369 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4373 hr = captureDevices->GetCount( &captureDeviceCount );
4374 if ( FAILED( hr ) ) {
4375 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4379 // Count render devices
4380 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4381 if ( FAILED( hr ) ) {
4382 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4386 hr = renderDevices->GetCount( &renderDeviceCount );
4387 if ( FAILED( hr ) ) {
4388 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4392 // validate device index
4393 if ( device >= captureDeviceCount + renderDeviceCount ) {
4394 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4395 errorType = RtAudioError::INVALID_USE;
4399 // determine whether index falls within capture or render devices
4400 if ( device >= renderDeviceCount ) {
4401 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4406 isCaptureDevice = true;
4409 hr = renderDevices->Item( device, &devicePtr );
4410 if ( FAILED( hr ) ) {
4411 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4414 isCaptureDevice = false;
4417 // get default device name
4418 if ( isCaptureDevice ) {
4419 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4420 if ( FAILED( hr ) ) {
4421 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4426 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4427 if ( FAILED( hr ) ) {
4428 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4433 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4434 if ( FAILED( hr ) ) {
4435 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4438 PropVariantInit( &defaultDeviceNameProp );
4440 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4441 if ( FAILED( hr ) ) {
4442 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4446 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4449 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4450 if ( FAILED( hr ) ) {
4451 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4455 PropVariantInit( &deviceNameProp );
4457 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4458 if ( FAILED( hr ) ) {
4459 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4463 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4466 if ( isCaptureDevice ) {
4467 info.isDefaultInput = info.name == defaultDeviceName;
4468 info.isDefaultOutput = false;
4471 info.isDefaultInput = false;
4472 info.isDefaultOutput = info.name == defaultDeviceName;
4476 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4477 if ( FAILED( hr ) ) {
4478 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4482 hr = audioClient->GetMixFormat( &deviceFormat );
4483 if ( FAILED( hr ) ) {
4484 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4488 if ( isCaptureDevice ) {
4489 info.inputChannels = deviceFormat->nChannels;
4490 info.outputChannels = 0;
4491 info.duplexChannels = 0;
4494 info.inputChannels = 0;
4495 info.outputChannels = deviceFormat->nChannels;
4496 info.duplexChannels = 0;
4500 info.sampleRates.clear();
4502 // allow support for all sample rates as we have a built-in sample rate converter
4503 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4504 info.sampleRates.push_back( SAMPLE_RATES[i] );
4506 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4509 info.nativeFormats = 0;
4511 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4512 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4513 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4515 if ( deviceFormat->wBitsPerSample == 32 ) {
4516 info.nativeFormats |= RTAUDIO_FLOAT32;
4518 else if ( deviceFormat->wBitsPerSample == 64 ) {
4519 info.nativeFormats |= RTAUDIO_FLOAT64;
4522 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4523 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4524 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4526 if ( deviceFormat->wBitsPerSample == 8 ) {
4527 info.nativeFormats |= RTAUDIO_SINT8;
4529 else if ( deviceFormat->wBitsPerSample == 16 ) {
4530 info.nativeFormats |= RTAUDIO_SINT16;
4532 else if ( deviceFormat->wBitsPerSample == 24 ) {
4533 info.nativeFormats |= RTAUDIO_SINT24;
4535 else if ( deviceFormat->wBitsPerSample == 32 ) {
4536 info.nativeFormats |= RTAUDIO_SINT32;
4544 // release all references
4545 PropVariantClear( &deviceNameProp );
4546 PropVariantClear( &defaultDeviceNameProp );
4548 SAFE_RELEASE( captureDevices );
4549 SAFE_RELEASE( renderDevices );
4550 SAFE_RELEASE( devicePtr );
4551 SAFE_RELEASE( defaultDevicePtr );
4552 SAFE_RELEASE( audioClient );
4553 SAFE_RELEASE( devicePropStore );
4554 SAFE_RELEASE( defaultDevicePropStore );
4556 CoTaskMemFree( deviceFormat );
4557 CoTaskMemFree( closestMatchFormat );
4559 if ( !errorText_.empty() )
4564 //-----------------------------------------------------------------------------
4566 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4568 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4569 if ( getDeviceInfo( i ).isDefaultOutput ) {
4577 //-----------------------------------------------------------------------------
4579 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4581 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4582 if ( getDeviceInfo( i ).isDefaultInput ) {
4590 //-----------------------------------------------------------------------------
4592 void RtApiWasapi::closeStream( void )
4594 if ( stream_.state == STREAM_CLOSED ) {
4595 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4596 error( RtAudioError::WARNING );
4600 if ( stream_.state != STREAM_STOPPED )
4603 // clean up stream memory
4604 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4605 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4607 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4608 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4610 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4611 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4613 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4614 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4616 delete ( WasapiHandle* ) stream_.apiHandle;
4617 stream_.apiHandle = NULL;
4619 for ( int i = 0; i < 2; i++ ) {
4620 if ( stream_.userBuffer[i] ) {
4621 free( stream_.userBuffer[i] );
4622 stream_.userBuffer[i] = 0;
4626 if ( stream_.deviceBuffer ) {
4627 free( stream_.deviceBuffer );
4628 stream_.deviceBuffer = 0;
4631 // update stream state
4632 stream_.state = STREAM_CLOSED;
4635 //-----------------------------------------------------------------------------
4637 void RtApiWasapi::startStream( void )
4641 if ( stream_.state == STREAM_RUNNING ) {
4642 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4643 error( RtAudioError::WARNING );
4647 #if defined( HAVE_GETTIMEOFDAY )
4648 gettimeofday( &stream_.lastTickTimestamp, NULL );
4651 // update stream state
4652 stream_.state = STREAM_RUNNING;
4654 // create WASAPI stream thread
4655 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4657 if ( !stream_.callbackInfo.thread ) {
4658 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4659 error( RtAudioError::THREAD_ERROR );
4662 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4663 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4667 //-----------------------------------------------------------------------------
4669 void RtApiWasapi::stopStream( void )
4673 if ( stream_.state == STREAM_STOPPED ) {
4674 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4675 error( RtAudioError::WARNING );
4679 // inform stream thread by setting stream state to STREAM_STOPPING
4680 stream_.state = STREAM_STOPPING;
4682 // wait until stream thread is stopped
4683 while( stream_.state != STREAM_STOPPED ) {
4687 // Wait for the last buffer to play before stopping.
4688 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4690 // close thread handle
4691 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4692 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4693 error( RtAudioError::THREAD_ERROR );
4697 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4700 //-----------------------------------------------------------------------------
4702 void RtApiWasapi::abortStream( void )
4706 if ( stream_.state == STREAM_STOPPED ) {
4707 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4708 error( RtAudioError::WARNING );
4712 // inform stream thread by setting stream state to STREAM_STOPPING
4713 stream_.state = STREAM_STOPPING;
4715 // wait until stream thread is stopped
4716 while ( stream_.state != STREAM_STOPPED ) {
4720 // close thread handle
4721 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4722 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4723 error( RtAudioError::THREAD_ERROR );
4727 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4730 //-----------------------------------------------------------------------------
4732 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4733 unsigned int firstChannel, unsigned int sampleRate,
4734 RtAudioFormat format, unsigned int* bufferSize,
4735 RtAudio::StreamOptions* options )
4737 bool methodResult = FAILURE;
4738 unsigned int captureDeviceCount = 0;
4739 unsigned int renderDeviceCount = 0;
4741 IMMDeviceCollection* captureDevices = NULL;
4742 IMMDeviceCollection* renderDevices = NULL;
4743 IMMDevice* devicePtr = NULL;
4744 WAVEFORMATEX* deviceFormat = NULL;
4745 unsigned int bufferBytes;
4746 stream_.state = STREAM_STOPPED;
4748 // create API Handle if not already created
4749 if ( !stream_.apiHandle )
4750 stream_.apiHandle = ( void* ) new WasapiHandle();
4752 // Count capture devices
4754 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4755 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4756 if ( FAILED( hr ) ) {
4757 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4761 hr = captureDevices->GetCount( &captureDeviceCount );
4762 if ( FAILED( hr ) ) {
4763 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4767 // Count render devices
4768 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4774 hr = renderDevices->GetCount( &renderDeviceCount );
4775 if ( FAILED( hr ) ) {
4776 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4780 // validate device index
4781 if ( device >= captureDeviceCount + renderDeviceCount ) {
4782 errorType = RtAudioError::INVALID_USE;
4783 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4787 // if device index falls within capture devices
4788 if ( device >= renderDeviceCount ) {
4789 if ( mode != INPUT ) {
4790 errorType = RtAudioError::INVALID_USE;
4791 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4795 // retrieve captureAudioClient from devicePtr
4796 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4798 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4799 if ( FAILED( hr ) ) {
4800 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4804 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4805 NULL, ( void** ) &captureAudioClient );
4806 if ( FAILED( hr ) ) {
4807 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4811 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4812 if ( FAILED( hr ) ) {
4813 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4817 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4818 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4821 // if device index falls within render devices and is configured for loopback
4822 if ( device < renderDeviceCount && mode == INPUT )
4824 // if renderAudioClient is not initialised, initialise it now
4825 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4826 if ( !renderAudioClient )
4828 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4831 // retrieve captureAudioClient from devicePtr
4832 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4834 hr = renderDevices->Item( device, &devicePtr );
4835 if ( FAILED( hr ) ) {
4836 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4840 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4841 NULL, ( void** ) &captureAudioClient );
4842 if ( FAILED( hr ) ) {
4843 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4847 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4848 if ( FAILED( hr ) ) {
4849 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4853 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4854 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4857 // if device index falls within render devices and is configured for output
4858 if ( device < renderDeviceCount && mode == OUTPUT )
4860 // if renderAudioClient is already initialised, don't initialise it again
4861 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4862 if ( renderAudioClient )
4864 methodResult = SUCCESS;
4868 hr = renderDevices->Item( device, &devicePtr );
4869 if ( FAILED( hr ) ) {
4870 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4874 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4875 NULL, ( void** ) &renderAudioClient );
4876 if ( FAILED( hr ) ) {
4877 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4881 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4882 if ( FAILED( hr ) ) {
4883 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4887 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4888 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4892 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4893 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4894 stream_.mode = DUPLEX;
4897 stream_.mode = mode;
4900 stream_.device[mode] = device;
4901 stream_.doByteSwap[mode] = false;
4902 stream_.sampleRate = sampleRate;
4903 stream_.bufferSize = *bufferSize;
4904 stream_.nBuffers = 1;
4905 stream_.nUserChannels[mode] = channels;
4906 stream_.channelOffset[mode] = firstChannel;
4907 stream_.userFormat = format;
4908 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4910 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4911 stream_.userInterleaved = false;
4913 stream_.userInterleaved = true;
4914 stream_.deviceInterleaved[mode] = true;
4916 // Set flags for buffer conversion.
4917 stream_.doConvertBuffer[mode] = false;
4918 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4919 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4920 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4921 stream_.doConvertBuffer[mode] = true;
4922 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4923 stream_.nUserChannels[mode] > 1 )
4924 stream_.doConvertBuffer[mode] = true;
4926 if ( stream_.doConvertBuffer[mode] )
4927 setConvertInfo( mode, 0 );
4929 // Allocate necessary internal buffers
4930 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4932 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4933 if ( !stream_.userBuffer[mode] ) {
4934 errorType = RtAudioError::MEMORY_ERROR;
4935 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4939 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4940 stream_.callbackInfo.priority = 15;
4942 stream_.callbackInfo.priority = 0;
4944 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4945 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4947 methodResult = SUCCESS;
4951 SAFE_RELEASE( captureDevices );
4952 SAFE_RELEASE( renderDevices );
4953 SAFE_RELEASE( devicePtr );
4954 CoTaskMemFree( deviceFormat );
4956 // if method failed, close the stream
4957 if ( methodResult == FAILURE )
4960 if ( !errorText_.empty() )
4962 return methodResult;
4965 //=============================================================================
4967 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4970 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4975 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4978 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4983 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4986 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4991 //-----------------------------------------------------------------------------
4993 void RtApiWasapi::wasapiThread()
4995 // as this is a new thread, we must CoInitialize it
4996 CoInitialize( NULL );
5000 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
5001 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
5002 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
5003 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
5004 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
5005 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
5007 WAVEFORMATEX* captureFormat = NULL;
5008 WAVEFORMATEX* renderFormat = NULL;
5009 float captureSrRatio = 0.0f;
5010 float renderSrRatio = 0.0f;
5011 WasapiBuffer captureBuffer;
5012 WasapiBuffer renderBuffer;
5013 WasapiResampler* captureResampler = NULL;
5014 WasapiResampler* renderResampler = NULL;
5016 // declare local stream variables
5017 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
5018 BYTE* streamBuffer = NULL;
5019 unsigned long captureFlags = 0;
5020 unsigned int bufferFrameCount = 0;
5021 unsigned int numFramesPadding = 0;
5022 unsigned int convBufferSize = 0;
5023 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
5024 bool callbackPushed = true;
5025 bool callbackPulled = false;
5026 bool callbackStopped = false;
5027 int callbackResult = 0;
5029 // convBuffer is used to store converted buffers between WASAPI and the user
5030 char* convBuffer = NULL;
5031 unsigned int convBuffSize = 0;
5032 unsigned int deviceBuffSize = 0;
5034 std::string errorText;
5035 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5037 // Attempt to assign "Pro Audio" characteristic to thread
5038 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5040 DWORD taskIndex = 0;
5041 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5042 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5043 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5044 FreeLibrary( AvrtDll );
5047 // start capture stream if applicable
5048 if ( captureAudioClient ) {
5049 hr = captureAudioClient->GetMixFormat( &captureFormat );
5050 if ( FAILED( hr ) ) {
5051 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5055 // init captureResampler
5056 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5057 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5058 captureFormat->nSamplesPerSec, stream_.sampleRate );
5060 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5062 if ( !captureClient ) {
5063 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5064 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5069 if ( FAILED( hr ) ) {
5070 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5074 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5075 ( void** ) &captureClient );
5076 if ( FAILED( hr ) ) {
5077 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5081 // don't configure captureEvent if in loopback mode
5082 if ( !loopbackEnabled )
5084 // configure captureEvent to trigger on every available capture buffer
5085 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5086 if ( !captureEvent ) {
5087 errorType = RtAudioError::SYSTEM_ERROR;
5088 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5092 hr = captureAudioClient->SetEventHandle( captureEvent );
5093 if ( FAILED( hr ) ) {
5094 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5098 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5101 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5103 // reset the capture stream
5104 hr = captureAudioClient->Reset();
5105 if ( FAILED( hr ) ) {
5106 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5110 // start the capture stream
5111 hr = captureAudioClient->Start();
5112 if ( FAILED( hr ) ) {
5113 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5118 unsigned int inBufferSize = 0;
5119 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5120 if ( FAILED( hr ) ) {
5121 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5125 // scale outBufferSize according to stream->user sample rate ratio
5126 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5127 inBufferSize *= stream_.nDeviceChannels[INPUT];
5129 // set captureBuffer size
5130 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5133 // start render stream if applicable
5134 if ( renderAudioClient ) {
5135 hr = renderAudioClient->GetMixFormat( &renderFormat );
5136 if ( FAILED( hr ) ) {
5137 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5141 // init renderResampler
5142 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5143 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5144 stream_.sampleRate, renderFormat->nSamplesPerSec );
5146 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5148 if ( !renderClient ) {
5149 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5150 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5155 if ( FAILED( hr ) ) {
5156 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5160 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5161 ( void** ) &renderClient );
5162 if ( FAILED( hr ) ) {
5163 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5167 // configure renderEvent to trigger on every available render buffer
5168 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5169 if ( !renderEvent ) {
5170 errorType = RtAudioError::SYSTEM_ERROR;
5171 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5175 hr = renderAudioClient->SetEventHandle( renderEvent );
5176 if ( FAILED( hr ) ) {
5177 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5181 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5182 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5184 // reset the render stream
5185 hr = renderAudioClient->Reset();
5186 if ( FAILED( hr ) ) {
5187 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5191 // start the render stream
5192 hr = renderAudioClient->Start();
5193 if ( FAILED( hr ) ) {
5194 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5199 unsigned int outBufferSize = 0;
5200 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5201 if ( FAILED( hr ) ) {
5202 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5206 // scale inBufferSize according to user->stream sample rate ratio
5207 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5208 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5210 // set renderBuffer size
5211 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5214 // malloc buffer memory
5215 if ( stream_.mode == INPUT )
5217 using namespace std; // for ceilf
5218 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5219 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5221 else if ( stream_.mode == OUTPUT )
5223 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5224 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5226 else if ( stream_.mode == DUPLEX )
5228 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5229 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5230 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5231 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5234 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5235 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5236 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5237 if ( !convBuffer || !stream_.deviceBuffer ) {
5238 errorType = RtAudioError::MEMORY_ERROR;
5239 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5243 // stream process loop
5244 while ( stream_.state != STREAM_STOPPING ) {
5245 if ( !callbackPulled ) {
5248 // 1. Pull callback buffer from inputBuffer
5249 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5250 // Convert callback buffer to user format
5252 if ( captureAudioClient )
5254 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5255 if ( captureSrRatio != 1 )
5257 // account for remainders
5262 while ( convBufferSize < stream_.bufferSize )
5264 // Pull callback buffer from inputBuffer
5265 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5266 samplesToPull * stream_.nDeviceChannels[INPUT],
5267 stream_.deviceFormat[INPUT] );
5269 if ( !callbackPulled )
5274 // Convert callback buffer to user sample rate
5275 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5276 unsigned int convSamples = 0;
5278 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5283 convBufferSize += convSamples;
5284 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5287 if ( callbackPulled )
5289 if ( stream_.doConvertBuffer[INPUT] ) {
5290 // Convert callback buffer to user format
5291 convertBuffer( stream_.userBuffer[INPUT],
5292 stream_.deviceBuffer,
5293 stream_.convertInfo[INPUT] );
5296 // no further conversion, simple copy deviceBuffer to userBuffer
5297 memcpy( stream_.userBuffer[INPUT],
5298 stream_.deviceBuffer,
5299 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5304 // if there is no capture stream, set callbackPulled flag
5305 callbackPulled = true;
5310 // 1. Execute user callback method
5311 // 2. Handle return value from callback
5313 // if callback has not requested the stream to stop
5314 if ( callbackPulled && !callbackStopped ) {
5315 // Execute user callback method
5316 callbackResult = callback( stream_.userBuffer[OUTPUT],
5317 stream_.userBuffer[INPUT],
5320 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5321 stream_.callbackInfo.userData );
5324 RtApi::tickStreamTime();
5326 // Handle return value from callback
5327 if ( callbackResult == 1 ) {
5328 // instantiate a thread to stop this thread
5329 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5330 if ( !threadHandle ) {
5331 errorType = RtAudioError::THREAD_ERROR;
5332 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5335 else if ( !CloseHandle( threadHandle ) ) {
5336 errorType = RtAudioError::THREAD_ERROR;
5337 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5341 callbackStopped = true;
5343 else if ( callbackResult == 2 ) {
5344 // instantiate a thread to stop this thread
5345 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5346 if ( !threadHandle ) {
5347 errorType = RtAudioError::THREAD_ERROR;
5348 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5351 else if ( !CloseHandle( threadHandle ) ) {
5352 errorType = RtAudioError::THREAD_ERROR;
5353 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5357 callbackStopped = true;
5364 // 1. Convert callback buffer to stream format
5365 // 2. Convert callback buffer to stream sample rate and channel count
5366 // 3. Push callback buffer into outputBuffer
5368 if ( renderAudioClient && callbackPulled )
5370 // if the last call to renderBuffer.PushBuffer() was successful
5371 if ( callbackPushed || convBufferSize == 0 )
5373 if ( stream_.doConvertBuffer[OUTPUT] )
5375 // Convert callback buffer to stream format
5376 convertBuffer( stream_.deviceBuffer,
5377 stream_.userBuffer[OUTPUT],
5378 stream_.convertInfo[OUTPUT] );
5382 // no further conversion, simple copy userBuffer to deviceBuffer
5383 memcpy( stream_.deviceBuffer,
5384 stream_.userBuffer[OUTPUT],
5385 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5388 // Convert callback buffer to stream sample rate
5389 renderResampler->Convert( convBuffer,
5390 stream_.deviceBuffer,
5395 // Push callback buffer into outputBuffer
5396 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5397 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5398 stream_.deviceFormat[OUTPUT] );
5401 // if there is no render stream, set callbackPushed flag
5402 callbackPushed = true;
5407 // 1. Get capture buffer from stream
5408 // 2. Push capture buffer into inputBuffer
5409 // 3. If 2. was successful: Release capture buffer
5411 if ( captureAudioClient ) {
5412 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5413 if ( !callbackPulled ) {
5414 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5417 // Get capture buffer from stream
5418 hr = captureClient->GetBuffer( &streamBuffer,
5420 &captureFlags, NULL, NULL );
5421 if ( FAILED( hr ) ) {
5422 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5426 if ( bufferFrameCount != 0 ) {
5427 // Push capture buffer into inputBuffer
5428 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5429 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5430 stream_.deviceFormat[INPUT] ) )
5432 // Release capture buffer
5433 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5434 if ( FAILED( hr ) ) {
5435 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5441 // Inform WASAPI that capture was unsuccessful
5442 hr = captureClient->ReleaseBuffer( 0 );
5443 if ( FAILED( hr ) ) {
5444 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5451 // Inform WASAPI that capture was unsuccessful
5452 hr = captureClient->ReleaseBuffer( 0 );
5453 if ( FAILED( hr ) ) {
5454 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5462 // 1. Get render buffer from stream
5463 // 2. Pull next buffer from outputBuffer
5464 // 3. If 2. was successful: Fill render buffer with next buffer
5465 // Release render buffer
5467 if ( renderAudioClient ) {
5468 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5469 if ( callbackPulled && !callbackPushed ) {
5470 WaitForSingleObject( renderEvent, INFINITE );
5473 // Get render buffer from stream
5474 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5475 if ( FAILED( hr ) ) {
5476 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5480 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5481 if ( FAILED( hr ) ) {
5482 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5486 bufferFrameCount -= numFramesPadding;
5488 if ( bufferFrameCount != 0 ) {
5489 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5490 if ( FAILED( hr ) ) {
5491 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5495 // Pull next buffer from outputBuffer
5496 // Fill render buffer with next buffer
5497 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5498 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5499 stream_.deviceFormat[OUTPUT] ) )
5501 // Release render buffer
5502 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5503 if ( FAILED( hr ) ) {
5504 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5510 // Inform WASAPI that render was unsuccessful
5511 hr = renderClient->ReleaseBuffer( 0, 0 );
5512 if ( FAILED( hr ) ) {
5513 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5520 // Inform WASAPI that render was unsuccessful
5521 hr = renderClient->ReleaseBuffer( 0, 0 );
5522 if ( FAILED( hr ) ) {
5523 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5529 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5530 if ( callbackPushed ) {
5531 // unsetting the callbackPulled flag lets the stream know that
5532 // the audio device is ready for another callback output buffer.
5533 callbackPulled = false;
5540 CoTaskMemFree( captureFormat );
5541 CoTaskMemFree( renderFormat );
5543 free ( convBuffer );
5544 delete renderResampler;
5545 delete captureResampler;
5549 // update stream state
5550 stream_.state = STREAM_STOPPED;
5552 if ( !errorText.empty() )
5554 errorText_ = errorText;
5559 //******************** End of __WINDOWS_WASAPI__ *********************//
5563 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5565 // Modified by Robin Davies, October 2005
5566 // - Improvements to DirectX pointer chasing.
5567 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5568 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5569 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5570 // Changed device query structure for RtAudio 4.0.7, January 2010
5572 #include <windows.h>
5573 #include <process.h>
5574 #include <mmsystem.h>
5578 #include <algorithm>
5580 #if defined(__MINGW32__)
5581 // missing from latest mingw winapi
5582 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5583 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5584 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5585 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5588 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5590 #ifdef _MSC_VER // if Microsoft Visual C++
5591 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5594 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5596 if ( pointer > bufferSize ) pointer -= bufferSize;
5597 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5598 if ( pointer < earlierPointer ) pointer += bufferSize;
5599 return pointer >= earlierPointer && pointer < laterPointer;
5602 // A structure to hold various information related to the DirectSound
5603 // API implementation.
5605 unsigned int drainCounter; // Tracks callback counts when draining
5606 bool internalDrain; // Indicates if stop is initiated from callback or not.
5610 UINT bufferPointer[2];
5611 DWORD dsBufferSize[2];
5612 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5616 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5619 // Declarations for utility functions, callbacks, and structures
5620 // specific to the DirectSound implementation.
5621 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5622 LPCTSTR description,
5626 static const char* getErrorString( int code );
5628 static unsigned __stdcall callbackHandler( void *ptr );
5637 : found(false) { validId[0] = false; validId[1] = false; }
5640 struct DsProbeData {
5642 std::vector<struct DsDevice>* dsDevices;
5645 RtApiDs :: RtApiDs()
5647 // Dsound will run both-threaded. If CoInitialize fails, then just
5648 // accept whatever the mainline chose for a threading model.
5649 coInitialized_ = false;
5650 HRESULT hr = CoInitialize( NULL );
5651 if ( !FAILED( hr ) ) coInitialized_ = true;
5654 RtApiDs :: ~RtApiDs()
5656 if ( stream_.state != STREAM_CLOSED ) closeStream();
5657 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5660 // The DirectSound default output is always the first device.
5661 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5666 // The DirectSound default input is always the first input device,
5667 // which is the first capture device enumerated.
5668 unsigned int RtApiDs :: getDefaultInputDevice( void )
5673 unsigned int RtApiDs :: getDeviceCount( void )
5675 // Set query flag for previously found devices to false, so that we
5676 // can check for any devices that have disappeared.
5677 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5678 dsDevices[i].found = false;
5680 // Query DirectSound devices.
5681 struct DsProbeData probeInfo;
5682 probeInfo.isInput = false;
5683 probeInfo.dsDevices = &dsDevices;
5684 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5685 if ( FAILED( result ) ) {
5686 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5687 errorText_ = errorStream_.str();
5688 error( RtAudioError::WARNING );
5691 // Query DirectSoundCapture devices.
5692 probeInfo.isInput = true;
5693 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5694 if ( FAILED( result ) ) {
5695 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5696 errorText_ = errorStream_.str();
5697 error( RtAudioError::WARNING );
5700 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5701 for ( unsigned int i=0; i<dsDevices.size(); ) {
5702 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5706 return static_cast<unsigned int>(dsDevices.size());
5709 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5711 RtAudio::DeviceInfo info;
5712 info.probed = false;
5714 if ( dsDevices.size() == 0 ) {
5715 // Force a query of all devices
5717 if ( dsDevices.size() == 0 ) {
5718 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5719 error( RtAudioError::INVALID_USE );
5724 if ( device >= dsDevices.size() ) {
5725 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5726 error( RtAudioError::INVALID_USE );
5731 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5733 LPDIRECTSOUND output;
5735 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5736 if ( FAILED( result ) ) {
5737 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5738 errorText_ = errorStream_.str();
5739 error( RtAudioError::WARNING );
5743 outCaps.dwSize = sizeof( outCaps );
5744 result = output->GetCaps( &outCaps );
5745 if ( FAILED( result ) ) {
5747 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5748 errorText_ = errorStream_.str();
5749 error( RtAudioError::WARNING );
5753 // Get output channel information.
5754 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5756 // Get sample rate information.
5757 info.sampleRates.clear();
5758 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5759 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5760 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5761 info.sampleRates.push_back( SAMPLE_RATES[k] );
5763 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5764 info.preferredSampleRate = SAMPLE_RATES[k];
5768 // Get format information.
5769 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5770 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5774 if ( getDefaultOutputDevice() == device )
5775 info.isDefaultOutput = true;
5777 if ( dsDevices[ device ].validId[1] == false ) {
5778 info.name = dsDevices[ device ].name;
5785 LPDIRECTSOUNDCAPTURE input;
5786 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5787 if ( FAILED( result ) ) {
5788 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5789 errorText_ = errorStream_.str();
5790 error( RtAudioError::WARNING );
5795 inCaps.dwSize = sizeof( inCaps );
5796 result = input->GetCaps( &inCaps );
5797 if ( FAILED( result ) ) {
5799 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5800 errorText_ = errorStream_.str();
5801 error( RtAudioError::WARNING );
5805 // Get input channel information.
5806 info.inputChannels = inCaps.dwChannels;
5808 // Get sample rate and format information.
5809 std::vector<unsigned int> rates;
5810 if ( inCaps.dwChannels >= 2 ) {
5811 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5812 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5813 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5814 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5815 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5816 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5817 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5818 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5820 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5821 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5822 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5823 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5824 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5826 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5827 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5828 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5829 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5830 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5833 else if ( inCaps.dwChannels == 1 ) {
5834 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5835 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5836 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5837 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5838 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5839 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5840 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5841 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5843 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5844 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5845 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5846 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5847 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5849 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5850 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5851 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5852 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5853 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5856 else info.inputChannels = 0; // technically, this would be an error
5860 if ( info.inputChannels == 0 ) return info;
5862 // Copy the supported rates to the info structure but avoid duplication.
5864 for ( unsigned int i=0; i<rates.size(); i++ ) {
5866 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5867 if ( rates[i] == info.sampleRates[j] ) {
5872 if ( found == false ) info.sampleRates.push_back( rates[i] );
5874 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5876 // If device opens for both playback and capture, we determine the channels.
5877 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5878 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5880 if ( device == 0 ) info.isDefaultInput = true;
5882 // Copy name and return.
5883 info.name = dsDevices[ device ].name;
5888 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5889 unsigned int firstChannel, unsigned int sampleRate,
5890 RtAudioFormat format, unsigned int *bufferSize,
5891 RtAudio::StreamOptions *options )
5893 if ( channels + firstChannel > 2 ) {
5894 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5898 size_t nDevices = dsDevices.size();
5899 if ( nDevices == 0 ) {
5900 // This should not happen because a check is made before this function is called.
5901 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5905 if ( device >= nDevices ) {
5906 // This should not happen because a check is made before this function is called.
5907 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5911 if ( mode == OUTPUT ) {
5912 if ( dsDevices[ device ].validId[0] == false ) {
5913 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5914 errorText_ = errorStream_.str();
5918 else { // mode == INPUT
5919 if ( dsDevices[ device ].validId[1] == false ) {
5920 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5921 errorText_ = errorStream_.str();
5926 // According to a note in PortAudio, using GetDesktopWindow()
5927 // instead of GetForegroundWindow() is supposed to avoid problems
5928 // that occur when the application's window is not the foreground
5929 // window. Also, if the application window closes before the
5930 // DirectSound buffer, DirectSound can crash. In the past, I had
5931 // problems when using GetDesktopWindow() but it seems fine now
5932 // (January 2010). I'll leave it commented here.
5933 // HWND hWnd = GetForegroundWindow();
5934 HWND hWnd = GetDesktopWindow();
5936 // Check the numberOfBuffers parameter and limit the lowest value to
5937 // two. This is a judgement call and a value of two is probably too
5938 // low for capture, but it should work for playback.
5940 if ( options ) nBuffers = options->numberOfBuffers;
5941 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5942 if ( nBuffers < 2 ) nBuffers = 3;
5944 // Check the lower range of the user-specified buffer size and set
5945 // (arbitrarily) to a lower bound of 32.
5946 if ( *bufferSize < 32 ) *bufferSize = 32;
5948 // Create the wave format structure. The data format setting will
5949 // be determined later.
5950 WAVEFORMATEX waveFormat;
5951 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5952 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5953 waveFormat.nChannels = channels + firstChannel;
5954 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5956 // Determine the device buffer size. By default, we'll use the value
5957 // defined above (32K), but we will grow it to make allowances for
5958 // very large software buffer sizes.
5959 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5960 DWORD dsPointerLeadTime = 0;
5962 void *ohandle = 0, *bhandle = 0;
5964 if ( mode == OUTPUT ) {
5966 LPDIRECTSOUND output;
5967 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5968 if ( FAILED( result ) ) {
5969 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5970 errorText_ = errorStream_.str();
5975 outCaps.dwSize = sizeof( outCaps );
5976 result = output->GetCaps( &outCaps );
5977 if ( FAILED( result ) ) {
5979 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5980 errorText_ = errorStream_.str();
5984 // Check channel information.
5985 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5986 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5987 errorText_ = errorStream_.str();
5991 // Check format information. Use 16-bit format unless not
5992 // supported or user requests 8-bit.
5993 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5994 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5995 waveFormat.wBitsPerSample = 16;
5996 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5999 waveFormat.wBitsPerSample = 8;
6000 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6002 stream_.userFormat = format;
6004 // Update wave format structure and buffer information.
6005 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6006 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6007 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6009 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6010 while ( dsPointerLeadTime * 2U > dsBufferSize )
6013 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
6014 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
6015 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
6016 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
6017 if ( FAILED( result ) ) {
6019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
6020 errorText_ = errorStream_.str();
6024 // Even though we will write to the secondary buffer, we need to
6025 // access the primary buffer to set the correct output format
6026 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
6027 // buffer description.
6028 DSBUFFERDESC bufferDescription;
6029 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6030 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6031 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6033 // Obtain the primary buffer
6034 LPDIRECTSOUNDBUFFER buffer;
6035 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6036 if ( FAILED( result ) ) {
6038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6039 errorText_ = errorStream_.str();
6043 // Set the primary DS buffer sound format.
6044 result = buffer->SetFormat( &waveFormat );
6045 if ( FAILED( result ) ) {
6047 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6048 errorText_ = errorStream_.str();
6052 // Setup the secondary DS buffer description.
6053 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6054 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6055 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6056 DSBCAPS_GLOBALFOCUS |
6057 DSBCAPS_GETCURRENTPOSITION2 |
6058 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6059 bufferDescription.dwBufferBytes = dsBufferSize;
6060 bufferDescription.lpwfxFormat = &waveFormat;
6062 // Try to create the secondary DS buffer. If that doesn't work,
6063 // try to use software mixing. Otherwise, there's a problem.
6064 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6065 if ( FAILED( result ) ) {
6066 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6067 DSBCAPS_GLOBALFOCUS |
6068 DSBCAPS_GETCURRENTPOSITION2 |
6069 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6070 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6071 if ( FAILED( result ) ) {
6073 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6074 errorText_ = errorStream_.str();
6079 // Get the buffer size ... might be different from what we specified.
6081 dsbcaps.dwSize = sizeof( DSBCAPS );
6082 result = buffer->GetCaps( &dsbcaps );
6083 if ( FAILED( result ) ) {
6086 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6087 errorText_ = errorStream_.str();
6091 dsBufferSize = dsbcaps.dwBufferBytes;
6093 // Lock the DS buffer
6096 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6097 if ( FAILED( result ) ) {
6100 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6101 errorText_ = errorStream_.str();
6105 // Zero the DS buffer
6106 ZeroMemory( audioPtr, dataLen );
6108 // Unlock the DS buffer
6109 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6110 if ( FAILED( result ) ) {
6113 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6114 errorText_ = errorStream_.str();
6118 ohandle = (void *) output;
6119 bhandle = (void *) buffer;
6122 if ( mode == INPUT ) {
6124 LPDIRECTSOUNDCAPTURE input;
6125 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6126 if ( FAILED( result ) ) {
6127 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6128 errorText_ = errorStream_.str();
6133 inCaps.dwSize = sizeof( inCaps );
6134 result = input->GetCaps( &inCaps );
6135 if ( FAILED( result ) ) {
6137 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6138 errorText_ = errorStream_.str();
6142 // Check channel information.
6143 if ( inCaps.dwChannels < channels + firstChannel ) {
6144 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6148 // Check format information. Use 16-bit format unless user
6150 DWORD deviceFormats;
6151 if ( channels + firstChannel == 2 ) {
6152 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6153 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6154 waveFormat.wBitsPerSample = 8;
6155 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6157 else { // assume 16-bit is supported
6158 waveFormat.wBitsPerSample = 16;
6159 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6162 else { // channel == 1
6163 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6164 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6165 waveFormat.wBitsPerSample = 8;
6166 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6168 else { // assume 16-bit is supported
6169 waveFormat.wBitsPerSample = 16;
6170 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6173 stream_.userFormat = format;
6175 // Update wave format structure and buffer information.
6176 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6177 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6178 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6180 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6181 while ( dsPointerLeadTime * 2U > dsBufferSize )
6184 // Setup the secondary DS buffer description.
6185 DSCBUFFERDESC bufferDescription;
6186 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6187 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6188 bufferDescription.dwFlags = 0;
6189 bufferDescription.dwReserved = 0;
6190 bufferDescription.dwBufferBytes = dsBufferSize;
6191 bufferDescription.lpwfxFormat = &waveFormat;
6193 // Create the capture buffer.
6194 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6195 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6196 if ( FAILED( result ) ) {
6198 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6199 errorText_ = errorStream_.str();
6203 // Get the buffer size ... might be different from what we specified.
6205 dscbcaps.dwSize = sizeof( DSCBCAPS );
6206 result = buffer->GetCaps( &dscbcaps );
6207 if ( FAILED( result ) ) {
6210 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6211 errorText_ = errorStream_.str();
6215 dsBufferSize = dscbcaps.dwBufferBytes;
6217 // NOTE: We could have a problem here if this is a duplex stream
6218 // and the play and capture hardware buffer sizes are different
6219 // (I'm actually not sure if that is a problem or not).
6220 // Currently, we are not verifying that.
6222 // Lock the capture buffer
6225 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6226 if ( FAILED( result ) ) {
6229 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6230 errorText_ = errorStream_.str();
6235 ZeroMemory( audioPtr, dataLen );
6237 // Unlock the buffer
6238 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6239 if ( FAILED( result ) ) {
6242 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6243 errorText_ = errorStream_.str();
6247 ohandle = (void *) input;
6248 bhandle = (void *) buffer;
6251 // Set various stream parameters
6252 DsHandle *handle = 0;
6253 stream_.nDeviceChannels[mode] = channels + firstChannel;
6254 stream_.nUserChannels[mode] = channels;
6255 stream_.bufferSize = *bufferSize;
6256 stream_.channelOffset[mode] = firstChannel;
6257 stream_.deviceInterleaved[mode] = true;
6258 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6259 else stream_.userInterleaved = true;
6261 // Set flag for buffer conversion
6262 stream_.doConvertBuffer[mode] = false;
6263 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6264 stream_.doConvertBuffer[mode] = true;
6265 if (stream_.userFormat != stream_.deviceFormat[mode])
6266 stream_.doConvertBuffer[mode] = true;
6267 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6268 stream_.nUserChannels[mode] > 1 )
6269 stream_.doConvertBuffer[mode] = true;
6271 // Allocate necessary internal buffers
6272 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6273 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6274 if ( stream_.userBuffer[mode] == NULL ) {
6275 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6279 if ( stream_.doConvertBuffer[mode] ) {
6281 bool makeBuffer = true;
6282 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6283 if ( mode == INPUT ) {
6284 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6285 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6286 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6291 bufferBytes *= *bufferSize;
6292 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6293 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6294 if ( stream_.deviceBuffer == NULL ) {
6295 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6301 // Allocate our DsHandle structures for the stream.
6302 if ( stream_.apiHandle == 0 ) {
6304 handle = new DsHandle;
6306 catch ( std::bad_alloc& ) {
6307 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6311 // Create a manual-reset event.
6312 handle->condition = CreateEvent( NULL, // no security
6313 TRUE, // manual-reset
6314 FALSE, // non-signaled initially
6316 stream_.apiHandle = (void *) handle;
6319 handle = (DsHandle *) stream_.apiHandle;
6320 handle->id[mode] = ohandle;
6321 handle->buffer[mode] = bhandle;
6322 handle->dsBufferSize[mode] = dsBufferSize;
6323 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6325 stream_.device[mode] = device;
6326 stream_.state = STREAM_STOPPED;
6327 if ( stream_.mode == OUTPUT && mode == INPUT )
6328 // We had already set up an output stream.
6329 stream_.mode = DUPLEX;
6331 stream_.mode = mode;
6332 stream_.nBuffers = nBuffers;
6333 stream_.sampleRate = sampleRate;
6335 // Setup the buffer conversion information structure.
6336 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6338 // Setup the callback thread.
6339 if ( stream_.callbackInfo.isRunning == false ) {
6341 stream_.callbackInfo.isRunning = true;
6342 stream_.callbackInfo.object = (void *) this;
6343 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6344 &stream_.callbackInfo, 0, &threadId );
6345 if ( stream_.callbackInfo.thread == 0 ) {
6346 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6350 // Boost DS thread priority
6351 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6357 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6358 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6359 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6360 if ( buffer ) buffer->Release();
6363 if ( handle->buffer[1] ) {
6364 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6365 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6366 if ( buffer ) buffer->Release();
6369 CloseHandle( handle->condition );
6371 stream_.apiHandle = 0;
6374 for ( int i=0; i<2; i++ ) {
6375 if ( stream_.userBuffer[i] ) {
6376 free( stream_.userBuffer[i] );
6377 stream_.userBuffer[i] = 0;
6381 if ( stream_.deviceBuffer ) {
6382 free( stream_.deviceBuffer );
6383 stream_.deviceBuffer = 0;
6386 stream_.state = STREAM_CLOSED;
6390 void RtApiDs :: closeStream()
6392 if ( stream_.state == STREAM_CLOSED ) {
6393 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6394 error( RtAudioError::WARNING );
6398 // Stop the callback thread.
6399 stream_.callbackInfo.isRunning = false;
6400 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6401 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6403 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6405 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6406 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6407 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6414 if ( handle->buffer[1] ) {
6415 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6416 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6423 CloseHandle( handle->condition );
6425 stream_.apiHandle = 0;
6428 for ( int i=0; i<2; i++ ) {
6429 if ( stream_.userBuffer[i] ) {
6430 free( stream_.userBuffer[i] );
6431 stream_.userBuffer[i] = 0;
6435 if ( stream_.deviceBuffer ) {
6436 free( stream_.deviceBuffer );
6437 stream_.deviceBuffer = 0;
6440 stream_.mode = UNINITIALIZED;
6441 stream_.state = STREAM_CLOSED;
6444 void RtApiDs :: startStream()
6447 if ( stream_.state == STREAM_RUNNING ) {
6448 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6449 error( RtAudioError::WARNING );
6453 #if defined( HAVE_GETTIMEOFDAY )
6454 gettimeofday( &stream_.lastTickTimestamp, NULL );
6457 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6459 // Increase scheduler frequency on lesser windows (a side-effect of
6460 // increasing timer accuracy). On greater windows (Win2K or later),
6461 // this is already in effect.
6462 timeBeginPeriod( 1 );
6464 buffersRolling = false;
6465 duplexPrerollBytes = 0;
6467 if ( stream_.mode == DUPLEX ) {
6468 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6469 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6473 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6475 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6476 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6477 if ( FAILED( result ) ) {
6478 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6479 errorText_ = errorStream_.str();
6484 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6486 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6487 result = buffer->Start( DSCBSTART_LOOPING );
6488 if ( FAILED( result ) ) {
6489 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6490 errorText_ = errorStream_.str();
6495 handle->drainCounter = 0;
6496 handle->internalDrain = false;
6497 ResetEvent( handle->condition );
6498 stream_.state = STREAM_RUNNING;
6501 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6504 void RtApiDs :: stopStream()
6507 if ( stream_.state == STREAM_STOPPED ) {
6508 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6509 error( RtAudioError::WARNING );
6516 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6517 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6518 if ( handle->drainCounter == 0 ) {
6519 handle->drainCounter = 2;
6520 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6523 stream_.state = STREAM_STOPPED;
6525 MUTEX_LOCK( &stream_.mutex );
6527 // Stop the buffer and clear memory
6528 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6529 result = buffer->Stop();
6530 if ( FAILED( result ) ) {
6531 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6532 errorText_ = errorStream_.str();
6536 // Lock the buffer and clear it so that if we start to play again,
6537 // we won't have old data playing.
6538 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6539 if ( FAILED( result ) ) {
6540 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6541 errorText_ = errorStream_.str();
6545 // Zero the DS buffer
6546 ZeroMemory( audioPtr, dataLen );
6548 // Unlock the DS buffer
6549 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6550 if ( FAILED( result ) ) {
6551 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6552 errorText_ = errorStream_.str();
6556 // If we start playing again, we must begin at beginning of buffer.
6557 handle->bufferPointer[0] = 0;
6560 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6561 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6565 stream_.state = STREAM_STOPPED;
6567 if ( stream_.mode != DUPLEX )
6568 MUTEX_LOCK( &stream_.mutex );
6570 result = buffer->Stop();
6571 if ( FAILED( result ) ) {
6572 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6573 errorText_ = errorStream_.str();
6577 // Lock the buffer and clear it so that if we start to play again,
6578 // we won't have old data playing.
6579 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6580 if ( FAILED( result ) ) {
6581 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6582 errorText_ = errorStream_.str();
6586 // Zero the DS buffer
6587 ZeroMemory( audioPtr, dataLen );
6589 // Unlock the DS buffer
6590 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6591 if ( FAILED( result ) ) {
6592 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6593 errorText_ = errorStream_.str();
6597 // If we start recording again, we must begin at beginning of buffer.
6598 handle->bufferPointer[1] = 0;
6602 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6603 MUTEX_UNLOCK( &stream_.mutex );
6605 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6608 void RtApiDs :: abortStream()
6611 if ( stream_.state == STREAM_STOPPED ) {
6612 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6613 error( RtAudioError::WARNING );
6617 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6618 handle->drainCounter = 2;
6623 void RtApiDs :: callbackEvent()
6625 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6626 Sleep( 50 ); // sleep 50 milliseconds
6630 if ( stream_.state == STREAM_CLOSED ) {
6631 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6632 error( RtAudioError::WARNING );
6636 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6637 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6639 // Check if we were draining the stream and signal is finished.
6640 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6642 stream_.state = STREAM_STOPPING;
6643 if ( handle->internalDrain == false )
6644 SetEvent( handle->condition );
6650 // Invoke user callback to get fresh output data UNLESS we are
6652 if ( handle->drainCounter == 0 ) {
6653 RtAudioCallback callback = (RtAudioCallback) info->callback;
6654 double streamTime = getStreamTime();
6655 RtAudioStreamStatus status = 0;
6656 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6657 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6658 handle->xrun[0] = false;
6660 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6661 status |= RTAUDIO_INPUT_OVERFLOW;
6662 handle->xrun[1] = false;
6664 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6665 stream_.bufferSize, streamTime, status, info->userData );
6666 if ( cbReturnValue == 2 ) {
6667 stream_.state = STREAM_STOPPING;
6668 handle->drainCounter = 2;
6672 else if ( cbReturnValue == 1 ) {
6673 handle->drainCounter = 1;
6674 handle->internalDrain = true;
6679 DWORD currentWritePointer, safeWritePointer;
6680 DWORD currentReadPointer, safeReadPointer;
6681 UINT nextWritePointer;
6683 LPVOID buffer1 = NULL;
6684 LPVOID buffer2 = NULL;
6685 DWORD bufferSize1 = 0;
6686 DWORD bufferSize2 = 0;
6691 MUTEX_LOCK( &stream_.mutex );
6692 if ( stream_.state == STREAM_STOPPED ) {
6693 MUTEX_UNLOCK( &stream_.mutex );
6697 if ( buffersRolling == false ) {
6698 if ( stream_.mode == DUPLEX ) {
6699 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6701 // It takes a while for the devices to get rolling. As a result,
6702 // there's no guarantee that the capture and write device pointers
6703 // will move in lockstep. Wait here for both devices to start
6704 // rolling, and then set our buffer pointers accordingly.
6705 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6706 // bytes later than the write buffer.
6708 // Stub: a serious risk of having a pre-emptive scheduling round
6709 // take place between the two GetCurrentPosition calls... but I'm
6710 // really not sure how to solve the problem. Temporarily boost to
6711 // Realtime priority, maybe; but I'm not sure what priority the
6712 // DirectSound service threads run at. We *should* be roughly
6713 // within a ms or so of correct.
6715 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6716 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6718 DWORD startSafeWritePointer, startSafeReadPointer;
6720 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6721 if ( FAILED( result ) ) {
6722 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6723 errorText_ = errorStream_.str();
6724 MUTEX_UNLOCK( &stream_.mutex );
6725 error( RtAudioError::SYSTEM_ERROR );
6728 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6729 if ( FAILED( result ) ) {
6730 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6731 errorText_ = errorStream_.str();
6732 MUTEX_UNLOCK( &stream_.mutex );
6733 error( RtAudioError::SYSTEM_ERROR );
6737 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6738 if ( FAILED( result ) ) {
6739 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6740 errorText_ = errorStream_.str();
6741 MUTEX_UNLOCK( &stream_.mutex );
6742 error( RtAudioError::SYSTEM_ERROR );
6745 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6746 if ( FAILED( result ) ) {
6747 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6748 errorText_ = errorStream_.str();
6749 MUTEX_UNLOCK( &stream_.mutex );
6750 error( RtAudioError::SYSTEM_ERROR );
6753 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6757 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6759 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6760 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6761 handle->bufferPointer[1] = safeReadPointer;
6763 else if ( stream_.mode == OUTPUT ) {
6765 // Set the proper nextWritePosition after initial startup.
6766 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6767 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6768 if ( FAILED( result ) ) {
6769 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6770 errorText_ = errorStream_.str();
6771 MUTEX_UNLOCK( &stream_.mutex );
6772 error( RtAudioError::SYSTEM_ERROR );
6775 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6776 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6779 buffersRolling = true;
6782 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6784 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6786 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6787 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6788 bufferBytes *= formatBytes( stream_.userFormat );
6789 memset( stream_.userBuffer[0], 0, bufferBytes );
6792 // Setup parameters and do buffer conversion if necessary.
6793 if ( stream_.doConvertBuffer[0] ) {
6794 buffer = stream_.deviceBuffer;
6795 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6796 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6797 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6800 buffer = stream_.userBuffer[0];
6801 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6802 bufferBytes *= formatBytes( stream_.userFormat );
6805 // No byte swapping necessary in DirectSound implementation.
6807 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6808 // unsigned. So, we need to convert our signed 8-bit data here to
6810 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6811 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6813 DWORD dsBufferSize = handle->dsBufferSize[0];
6814 nextWritePointer = handle->bufferPointer[0];
6816 DWORD endWrite, leadPointer;
6818 // Find out where the read and "safe write" pointers are.
6819 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6820 if ( FAILED( result ) ) {
6821 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6822 errorText_ = errorStream_.str();
6823 MUTEX_UNLOCK( &stream_.mutex );
6824 error( RtAudioError::SYSTEM_ERROR );
6828 // We will copy our output buffer into the region between
6829 // safeWritePointer and leadPointer. If leadPointer is not
6830 // beyond the next endWrite position, wait until it is.
6831 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6832 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6833 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6834 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6835 endWrite = nextWritePointer + bufferBytes;
6837 // Check whether the entire write region is behind the play pointer.
6838 if ( leadPointer >= endWrite ) break;
6840 // If we are here, then we must wait until the leadPointer advances
6841 // beyond the end of our next write region. We use the
6842 // Sleep() function to suspend operation until that happens.
6843 double millis = ( endWrite - leadPointer ) * 1000.0;
6844 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6845 if ( millis < 1.0 ) millis = 1.0;
6846 Sleep( (DWORD) millis );
6849 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6850 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6851 // We've strayed into the forbidden zone ... resync the read pointer.
6852 handle->xrun[0] = true;
6853 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6854 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6855 handle->bufferPointer[0] = nextWritePointer;
6856 endWrite = nextWritePointer + bufferBytes;
6859 // Lock free space in the buffer
6860 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6861 &bufferSize1, &buffer2, &bufferSize2, 0 );
6862 if ( FAILED( result ) ) {
6863 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6864 errorText_ = errorStream_.str();
6865 MUTEX_UNLOCK( &stream_.mutex );
6866 error( RtAudioError::SYSTEM_ERROR );
6870 // Copy our buffer into the DS buffer
6871 CopyMemory( buffer1, buffer, bufferSize1 );
6872 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6874 // Update our buffer offset and unlock sound buffer
6875 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6876 if ( FAILED( result ) ) {
6877 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6878 errorText_ = errorStream_.str();
6879 MUTEX_UNLOCK( &stream_.mutex );
6880 error( RtAudioError::SYSTEM_ERROR );
6883 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6884 handle->bufferPointer[0] = nextWritePointer;
6887 // Don't bother draining input
6888 if ( handle->drainCounter ) {
6889 handle->drainCounter++;
6893 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6895 // Setup parameters.
6896 if ( stream_.doConvertBuffer[1] ) {
6897 buffer = stream_.deviceBuffer;
6898 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6899 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6902 buffer = stream_.userBuffer[1];
6903 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6904 bufferBytes *= formatBytes( stream_.userFormat );
6907 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6908 long nextReadPointer = handle->bufferPointer[1];
6909 DWORD dsBufferSize = handle->dsBufferSize[1];
6911 // Find out where the write and "safe read" pointers are.
6912 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6913 if ( FAILED( result ) ) {
6914 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6915 errorText_ = errorStream_.str();
6916 MUTEX_UNLOCK( &stream_.mutex );
6917 error( RtAudioError::SYSTEM_ERROR );
6921 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6922 DWORD endRead = nextReadPointer + bufferBytes;
6924 // Handling depends on whether we are INPUT or DUPLEX.
6925 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6926 // then a wait here will drag the write pointers into the forbidden zone.
6928 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6929 // it's in a safe position. This causes dropouts, but it seems to be the only
6930 // practical way to sync up the read and write pointers reliably, given the
6931 // the very complex relationship between phase and increment of the read and write
6934 // In order to minimize audible dropouts in DUPLEX mode, we will
6935 // provide a pre-roll period of 0.5 seconds in which we return
6936 // zeros from the read buffer while the pointers sync up.
6938 if ( stream_.mode == DUPLEX ) {
6939 if ( safeReadPointer < endRead ) {
6940 if ( duplexPrerollBytes <= 0 ) {
6941 // Pre-roll time over. Be more agressive.
6942 int adjustment = endRead-safeReadPointer;
6944 handle->xrun[1] = true;
6946 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6947 // and perform fine adjustments later.
6948 // - small adjustments: back off by twice as much.
6949 if ( adjustment >= 2*bufferBytes )
6950 nextReadPointer = safeReadPointer-2*bufferBytes;
6952 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6954 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6958 // In pre=roll time. Just do it.
6959 nextReadPointer = safeReadPointer - bufferBytes;
6960 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6962 endRead = nextReadPointer + bufferBytes;
6965 else { // mode == INPUT
6966 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6967 // See comments for playback.
6968 double millis = (endRead - safeReadPointer) * 1000.0;
6969 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6970 if ( millis < 1.0 ) millis = 1.0;
6971 Sleep( (DWORD) millis );
6973 // Wake up and find out where we are now.
6974 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6975 if ( FAILED( result ) ) {
6976 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6977 errorText_ = errorStream_.str();
6978 MUTEX_UNLOCK( &stream_.mutex );
6979 error( RtAudioError::SYSTEM_ERROR );
6983 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6987 // Lock free space in the buffer
6988 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6989 &bufferSize1, &buffer2, &bufferSize2, 0 );
6990 if ( FAILED( result ) ) {
6991 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6992 errorText_ = errorStream_.str();
6993 MUTEX_UNLOCK( &stream_.mutex );
6994 error( RtAudioError::SYSTEM_ERROR );
6998 if ( duplexPrerollBytes <= 0 ) {
6999 // Copy our buffer into the DS buffer
7000 CopyMemory( buffer, buffer1, bufferSize1 );
7001 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
7004 memset( buffer, 0, bufferSize1 );
7005 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
7006 duplexPrerollBytes -= bufferSize1 + bufferSize2;
7009 // Update our buffer offset and unlock sound buffer
7010 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
7011 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
7012 if ( FAILED( result ) ) {
7013 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
7014 errorText_ = errorStream_.str();
7015 MUTEX_UNLOCK( &stream_.mutex );
7016 error( RtAudioError::SYSTEM_ERROR );
7019 handle->bufferPointer[1] = nextReadPointer;
7021 // No byte swapping necessary in DirectSound implementation.
7023 // If necessary, convert 8-bit data from unsigned to signed.
7024 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
7025 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
7027 // Do buffer conversion if necessary.
7028 if ( stream_.doConvertBuffer[1] )
7029 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7033 MUTEX_UNLOCK( &stream_.mutex );
7034 RtApi::tickStreamTime();
7037 // Definitions for utility functions and callbacks
7038 // specific to the DirectSound implementation.
7040 static unsigned __stdcall callbackHandler( void *ptr )
7042 CallbackInfo *info = (CallbackInfo *) ptr;
7043 RtApiDs *object = (RtApiDs *) info->object;
7044 bool* isRunning = &info->isRunning;
7046 while ( *isRunning == true ) {
7047 object->callbackEvent();
7054 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7055 LPCTSTR description,
7059 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7060 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7063 bool validDevice = false;
7064 if ( probeInfo.isInput == true ) {
7066 LPDIRECTSOUNDCAPTURE object;
7068 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7069 if ( hr != DS_OK ) return TRUE;
7071 caps.dwSize = sizeof(caps);
7072 hr = object->GetCaps( &caps );
7073 if ( hr == DS_OK ) {
7074 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7081 LPDIRECTSOUND object;
7082 hr = DirectSoundCreate( lpguid, &object, NULL );
7083 if ( hr != DS_OK ) return TRUE;
7085 caps.dwSize = sizeof(caps);
7086 hr = object->GetCaps( &caps );
7087 if ( hr == DS_OK ) {
7088 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7094 // If good device, then save its name and guid.
7095 std::string name = convertCharPointerToStdString( description );
7096 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7097 if ( lpguid == NULL )
7098 name = "Default Device";
7099 if ( validDevice ) {
7100 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7101 if ( dsDevices[i].name == name ) {
7102 dsDevices[i].found = true;
7103 if ( probeInfo.isInput ) {
7104 dsDevices[i].id[1] = lpguid;
7105 dsDevices[i].validId[1] = true;
7108 dsDevices[i].id[0] = lpguid;
7109 dsDevices[i].validId[0] = true;
7117 device.found = true;
7118 if ( probeInfo.isInput ) {
7119 device.id[1] = lpguid;
7120 device.validId[1] = true;
7123 device.id[0] = lpguid;
7124 device.validId[0] = true;
7126 dsDevices.push_back( device );
7132 static const char* getErrorString( int code )
7136 case DSERR_ALLOCATED:
7137 return "Already allocated";
7139 case DSERR_CONTROLUNAVAIL:
7140 return "Control unavailable";
7142 case DSERR_INVALIDPARAM:
7143 return "Invalid parameter";
7145 case DSERR_INVALIDCALL:
7146 return "Invalid call";
7149 return "Generic error";
7151 case DSERR_PRIOLEVELNEEDED:
7152 return "Priority level needed";
7154 case DSERR_OUTOFMEMORY:
7155 return "Out of memory";
7157 case DSERR_BADFORMAT:
7158 return "The sample rate or the channel format is not supported";
7160 case DSERR_UNSUPPORTED:
7161 return "Not supported";
7163 case DSERR_NODRIVER:
7166 case DSERR_ALREADYINITIALIZED:
7167 return "Already initialized";
7169 case DSERR_NOAGGREGATION:
7170 return "No aggregation";
7172 case DSERR_BUFFERLOST:
7173 return "Buffer lost";
7175 case DSERR_OTHERAPPHASPRIO:
7176 return "Another application already has priority";
7178 case DSERR_UNINITIALIZED:
7179 return "Uninitialized";
7182 return "DirectSound unknown error";
7185 //******************** End of __WINDOWS_DS__ *********************//
7189 #if defined(__LINUX_ALSA__)
7191 #include <alsa/asoundlib.h>
7194 // A structure to hold various information related to the ALSA API
7197 snd_pcm_t *handles[2];
7200 pthread_cond_t runnable_cv;
7204 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7207 static void *alsaCallbackHandler( void * ptr );
7209 RtApiAlsa :: RtApiAlsa()
7211 // Nothing to do here.
7214 RtApiAlsa :: ~RtApiAlsa()
7216 if ( stream_.state != STREAM_CLOSED ) closeStream();
7219 unsigned int RtApiAlsa :: getDeviceCount( void )
7221 unsigned nDevices = 0;
7222 int result, subdevice, card;
7224 snd_ctl_t *handle = 0;
7226 // Count cards and devices
7228 snd_card_next( &card );
7229 while ( card >= 0 ) {
7230 sprintf( name, "hw:%d", card );
7231 result = snd_ctl_open( &handle, name, 0 );
7234 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7235 errorText_ = errorStream_.str();
7236 error( RtAudioError::WARNING );
7241 result = snd_ctl_pcm_next_device( handle, &subdevice );
7243 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7244 errorText_ = errorStream_.str();
7245 error( RtAudioError::WARNING );
7248 if ( subdevice < 0 )
7254 snd_ctl_close( handle );
7255 snd_card_next( &card );
7258 result = snd_ctl_open( &handle, "default", 0 );
7261 snd_ctl_close( handle );
7267 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7269 RtAudio::DeviceInfo info;
7270 info.probed = false;
7272 unsigned nDevices = 0;
7273 int result, subdevice, card;
7275 snd_ctl_t *chandle = 0;
7277 // Count cards and devices
7280 snd_card_next( &card );
7281 while ( card >= 0 ) {
7282 sprintf( name, "hw:%d", card );
7283 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7286 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7287 errorText_ = errorStream_.str();
7288 error( RtAudioError::WARNING );
7293 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7295 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7296 errorText_ = errorStream_.str();
7297 error( RtAudioError::WARNING );
7300 if ( subdevice < 0 ) break;
7301 if ( nDevices == device ) {
7302 sprintf( name, "hw:%d,%d", card, subdevice );
7309 snd_ctl_close( chandle );
7310 snd_card_next( &card );
7313 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7314 if ( result == 0 ) {
7315 if ( nDevices == device ) {
7316 strcpy( name, "default" );
7322 if ( nDevices == 0 ) {
7323 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7324 error( RtAudioError::INVALID_USE );
7328 if ( device >= nDevices ) {
7329 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7330 error( RtAudioError::INVALID_USE );
7336 // If a stream is already open, we cannot probe the stream devices.
7337 // Thus, use the saved results.
7338 if ( stream_.state != STREAM_CLOSED &&
7339 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7340 snd_ctl_close( chandle );
7341 if ( device >= devices_.size() ) {
7342 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7343 error( RtAudioError::WARNING );
7346 return devices_[ device ];
7349 int openMode = SND_PCM_ASYNC;
7350 snd_pcm_stream_t stream;
7351 snd_pcm_info_t *pcminfo;
7352 snd_pcm_info_alloca( &pcminfo );
7354 snd_pcm_hw_params_t *params;
7355 snd_pcm_hw_params_alloca( ¶ms );
7357 // First try for playback unless default device (which has subdev -1)
7358 stream = SND_PCM_STREAM_PLAYBACK;
7359 snd_pcm_info_set_stream( pcminfo, stream );
7360 if ( subdevice != -1 ) {
7361 snd_pcm_info_set_device( pcminfo, subdevice );
7362 snd_pcm_info_set_subdevice( pcminfo, 0 );
7364 result = snd_ctl_pcm_info( chandle, pcminfo );
7366 // Device probably doesn't support playback.
7371 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7373 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7374 errorText_ = errorStream_.str();
7375 error( RtAudioError::WARNING );
7379 // The device is open ... fill the parameter structure.
7380 result = snd_pcm_hw_params_any( phandle, params );
7382 snd_pcm_close( phandle );
7383 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7384 errorText_ = errorStream_.str();
7385 error( RtAudioError::WARNING );
7389 // Get output channel information.
7391 result = snd_pcm_hw_params_get_channels_max( params, &value );
7393 snd_pcm_close( phandle );
7394 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7395 errorText_ = errorStream_.str();
7396 error( RtAudioError::WARNING );
7399 info.outputChannels = value;
7400 snd_pcm_close( phandle );
7403 stream = SND_PCM_STREAM_CAPTURE;
7404 snd_pcm_info_set_stream( pcminfo, stream );
7406 // Now try for capture unless default device (with subdev = -1)
7407 if ( subdevice != -1 ) {
7408 result = snd_ctl_pcm_info( chandle, pcminfo );
7409 snd_ctl_close( chandle );
7411 // Device probably doesn't support capture.
7412 if ( info.outputChannels == 0 ) return info;
7413 goto probeParameters;
7417 snd_ctl_close( chandle );
7419 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7421 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7422 errorText_ = errorStream_.str();
7423 error( RtAudioError::WARNING );
7424 if ( info.outputChannels == 0 ) return info;
7425 goto probeParameters;
7428 // The device is open ... fill the parameter structure.
7429 result = snd_pcm_hw_params_any( phandle, params );
7431 snd_pcm_close( phandle );
7432 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7433 errorText_ = errorStream_.str();
7434 error( RtAudioError::WARNING );
7435 if ( info.outputChannels == 0 ) return info;
7436 goto probeParameters;
7439 result = snd_pcm_hw_params_get_channels_max( params, &value );
7441 snd_pcm_close( phandle );
7442 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7443 errorText_ = errorStream_.str();
7444 error( RtAudioError::WARNING );
7445 if ( info.outputChannels == 0 ) return info;
7446 goto probeParameters;
7448 info.inputChannels = value;
7449 snd_pcm_close( phandle );
7451 // If device opens for both playback and capture, we determine the channels.
7452 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7453 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7455 // ALSA doesn't provide default devices so we'll use the first available one.
7456 if ( device == 0 && info.outputChannels > 0 )
7457 info.isDefaultOutput = true;
7458 if ( device == 0 && info.inputChannels > 0 )
7459 info.isDefaultInput = true;
7462 // At this point, we just need to figure out the supported data
7463 // formats and sample rates. We'll proceed by opening the device in
7464 // the direction with the maximum number of channels, or playback if
7465 // they are equal. This might limit our sample rate options, but so
7468 if ( info.outputChannels >= info.inputChannels )
7469 stream = SND_PCM_STREAM_PLAYBACK;
7471 stream = SND_PCM_STREAM_CAPTURE;
7472 snd_pcm_info_set_stream( pcminfo, stream );
7474 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7476 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7477 errorText_ = errorStream_.str();
7478 error( RtAudioError::WARNING );
7482 // The device is open ... fill the parameter structure.
7483 result = snd_pcm_hw_params_any( phandle, params );
7485 snd_pcm_close( phandle );
7486 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7487 errorText_ = errorStream_.str();
7488 error( RtAudioError::WARNING );
7492 // Test our discrete set of sample rate values.
7493 info.sampleRates.clear();
7494 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7495 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7496 info.sampleRates.push_back( SAMPLE_RATES[i] );
7498 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7499 info.preferredSampleRate = SAMPLE_RATES[i];
7502 if ( info.sampleRates.size() == 0 ) {
7503 snd_pcm_close( phandle );
7504 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7505 errorText_ = errorStream_.str();
7506 error( RtAudioError::WARNING );
7510 // Probe the supported data formats ... we don't care about endian-ness just yet
7511 snd_pcm_format_t format;
7512 info.nativeFormats = 0;
7513 format = SND_PCM_FORMAT_S8;
7514 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7515 info.nativeFormats |= RTAUDIO_SINT8;
7516 format = SND_PCM_FORMAT_S16;
7517 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7518 info.nativeFormats |= RTAUDIO_SINT16;
7519 format = SND_PCM_FORMAT_S24;
7520 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7521 info.nativeFormats |= RTAUDIO_SINT24;
7522 format = SND_PCM_FORMAT_S32;
7523 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7524 info.nativeFormats |= RTAUDIO_SINT32;
7525 format = SND_PCM_FORMAT_FLOAT;
7526 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7527 info.nativeFormats |= RTAUDIO_FLOAT32;
7528 format = SND_PCM_FORMAT_FLOAT64;
7529 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7530 info.nativeFormats |= RTAUDIO_FLOAT64;
7532 // Check that we have at least one supported format
7533 if ( info.nativeFormats == 0 ) {
7534 snd_pcm_close( phandle );
7535 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7536 errorText_ = errorStream_.str();
7537 error( RtAudioError::WARNING );
7541 // Get the device name
7543 result = snd_card_get_name( card, &cardname );
7544 if ( result >= 0 ) {
7545 sprintf( name, "hw:%s,%d", cardname, subdevice );
7550 // That's all ... close the device and return
7551 snd_pcm_close( phandle );
7556 void RtApiAlsa :: saveDeviceInfo( void )
7560 unsigned int nDevices = getDeviceCount();
7561 devices_.resize( nDevices );
7562 for ( unsigned int i=0; i<nDevices; i++ )
7563 devices_[i] = getDeviceInfo( i );
7566 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7567 unsigned int firstChannel, unsigned int sampleRate,
7568 RtAudioFormat format, unsigned int *bufferSize,
7569 RtAudio::StreamOptions *options )
7572 #if defined(__RTAUDIO_DEBUG__)
7574 snd_output_stdio_attach(&out, stderr, 0);
7577 // I'm not using the "plug" interface ... too much inconsistent behavior.
7579 unsigned nDevices = 0;
7580 int result, subdevice, card;
7584 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7585 snprintf(name, sizeof(name), "%s", "default");
7587 // Count cards and devices
7589 snd_card_next( &card );
7590 while ( card >= 0 ) {
7591 sprintf( name, "hw:%d", card );
7592 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7594 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7595 errorText_ = errorStream_.str();
7600 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7601 if ( result < 0 ) break;
7602 if ( subdevice < 0 ) break;
7603 if ( nDevices == device ) {
7604 sprintf( name, "hw:%d,%d", card, subdevice );
7605 snd_ctl_close( chandle );
7610 snd_ctl_close( chandle );
7611 snd_card_next( &card );
7614 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7615 if ( result == 0 ) {
7616 if ( nDevices == device ) {
7617 strcpy( name, "default" );
7618 snd_ctl_close( chandle );
7623 snd_ctl_close( chandle );
7625 if ( nDevices == 0 ) {
7626 // This should not happen because a check is made before this function is called.
7627 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7631 if ( device >= nDevices ) {
7632 // This should not happen because a check is made before this function is called.
7633 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7640 // The getDeviceInfo() function will not work for a device that is
7641 // already open. Thus, we'll probe the system before opening a
7642 // stream and save the results for use by getDeviceInfo().
7643 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7644 this->saveDeviceInfo();
7646 snd_pcm_stream_t stream;
7647 if ( mode == OUTPUT )
7648 stream = SND_PCM_STREAM_PLAYBACK;
7650 stream = SND_PCM_STREAM_CAPTURE;
7653 int openMode = SND_PCM_ASYNC;
7654 result = snd_pcm_open( &phandle, name, stream, openMode );
7656 if ( mode == OUTPUT )
7657 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7659 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7660 errorText_ = errorStream_.str();
7664 // Fill the parameter structure.
7665 snd_pcm_hw_params_t *hw_params;
7666 snd_pcm_hw_params_alloca( &hw_params );
7667 result = snd_pcm_hw_params_any( phandle, hw_params );
7669 snd_pcm_close( phandle );
7670 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7671 errorText_ = errorStream_.str();
7675 #if defined(__RTAUDIO_DEBUG__)
7676 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7677 snd_pcm_hw_params_dump( hw_params, out );
7680 // Set access ... check user preference.
7681 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7682 stream_.userInterleaved = false;
7683 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7685 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7686 stream_.deviceInterleaved[mode] = true;
7689 stream_.deviceInterleaved[mode] = false;
7692 stream_.userInterleaved = true;
7693 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7695 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7696 stream_.deviceInterleaved[mode] = false;
7699 stream_.deviceInterleaved[mode] = true;
7703 snd_pcm_close( phandle );
7704 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7705 errorText_ = errorStream_.str();
7709 // Determine how to set the device format.
7710 stream_.userFormat = format;
7711 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7713 if ( format == RTAUDIO_SINT8 )
7714 deviceFormat = SND_PCM_FORMAT_S8;
7715 else if ( format == RTAUDIO_SINT16 )
7716 deviceFormat = SND_PCM_FORMAT_S16;
7717 else if ( format == RTAUDIO_SINT24 )
7718 deviceFormat = SND_PCM_FORMAT_S24;
7719 else if ( format == RTAUDIO_SINT32 )
7720 deviceFormat = SND_PCM_FORMAT_S32;
7721 else if ( format == RTAUDIO_FLOAT32 )
7722 deviceFormat = SND_PCM_FORMAT_FLOAT;
7723 else if ( format == RTAUDIO_FLOAT64 )
7724 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7726 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7727 stream_.deviceFormat[mode] = format;
7731 // The user requested format is not natively supported by the device.
7732 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7733 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7734 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7738 deviceFormat = SND_PCM_FORMAT_FLOAT;
7739 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7740 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7744 deviceFormat = SND_PCM_FORMAT_S32;
7745 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7746 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7750 deviceFormat = SND_PCM_FORMAT_S24;
7751 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7752 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7756 deviceFormat = SND_PCM_FORMAT_S16;
7757 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7758 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7762 deviceFormat = SND_PCM_FORMAT_S8;
7763 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7764 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7768 // If we get here, no supported format was found.
7769 snd_pcm_close( phandle );
7770 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7771 errorText_ = errorStream_.str();
7775 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7777 snd_pcm_close( phandle );
7778 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7779 errorText_ = errorStream_.str();
7783 // Determine whether byte-swaping is necessary.
7784 stream_.doByteSwap[mode] = false;
7785 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7786 result = snd_pcm_format_cpu_endian( deviceFormat );
7788 stream_.doByteSwap[mode] = true;
7789 else if (result < 0) {
7790 snd_pcm_close( phandle );
7791 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7792 errorText_ = errorStream_.str();
7797 // Set the sample rate.
7798 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7800 snd_pcm_close( phandle );
7801 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7802 errorText_ = errorStream_.str();
7806 // Determine the number of channels for this device. We support a possible
7807 // minimum device channel number > than the value requested by the user.
7808 stream_.nUserChannels[mode] = channels;
7810 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7811 unsigned int deviceChannels = value;
7812 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7813 snd_pcm_close( phandle );
7814 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7815 errorText_ = errorStream_.str();
7819 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7821 snd_pcm_close( phandle );
7822 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7823 errorText_ = errorStream_.str();
7826 deviceChannels = value;
7827 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7828 stream_.nDeviceChannels[mode] = deviceChannels;
7830 // Set the device channels.
7831 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7833 snd_pcm_close( phandle );
7834 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7835 errorText_ = errorStream_.str();
7839 // Set the buffer (or period) size.
7841 snd_pcm_uframes_t periodSize = *bufferSize;
7842 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7844 snd_pcm_close( phandle );
7845 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7846 errorText_ = errorStream_.str();
7849 *bufferSize = periodSize;
7851 // Set the buffer number, which in ALSA is referred to as the "period".
7852 unsigned int periods = 0;
7853 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7854 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7855 if ( periods < 2 ) periods = 4; // a fairly safe default value
7856 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7858 snd_pcm_close( phandle );
7859 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7860 errorText_ = errorStream_.str();
7864 // If attempting to setup a duplex stream, the bufferSize parameter
7865 // MUST be the same in both directions!
7866 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7867 snd_pcm_close( phandle );
7868 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7869 errorText_ = errorStream_.str();
7873 stream_.bufferSize = *bufferSize;
7875 // Install the hardware configuration
7876 result = snd_pcm_hw_params( phandle, hw_params );
7878 snd_pcm_close( phandle );
7879 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7880 errorText_ = errorStream_.str();
7884 #if defined(__RTAUDIO_DEBUG__)
7885 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7886 snd_pcm_hw_params_dump( hw_params, out );
7889 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7890 snd_pcm_sw_params_t *sw_params = NULL;
7891 snd_pcm_sw_params_alloca( &sw_params );
7892 snd_pcm_sw_params_current( phandle, sw_params );
7893 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7894 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7895 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7897 // The following two settings were suggested by Theo Veenker
7898 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7899 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7901 // here are two options for a fix
7902 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7903 snd_pcm_uframes_t val;
7904 snd_pcm_sw_params_get_boundary( sw_params, &val );
7905 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7907 result = snd_pcm_sw_params( phandle, sw_params );
7909 snd_pcm_close( phandle );
7910 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7911 errorText_ = errorStream_.str();
7915 #if defined(__RTAUDIO_DEBUG__)
7916 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7917 snd_pcm_sw_params_dump( sw_params, out );
7920 // Set flags for buffer conversion
7921 stream_.doConvertBuffer[mode] = false;
7922 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7923 stream_.doConvertBuffer[mode] = true;
7924 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7925 stream_.doConvertBuffer[mode] = true;
7926 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7927 stream_.nUserChannels[mode] > 1 )
7928 stream_.doConvertBuffer[mode] = true;
7930 // Allocate the ApiHandle if necessary and then save.
7931 AlsaHandle *apiInfo = 0;
7932 if ( stream_.apiHandle == 0 ) {
7934 apiInfo = (AlsaHandle *) new AlsaHandle;
7936 catch ( std::bad_alloc& ) {
7937 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7941 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7942 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7946 stream_.apiHandle = (void *) apiInfo;
7947 apiInfo->handles[0] = 0;
7948 apiInfo->handles[1] = 0;
7951 apiInfo = (AlsaHandle *) stream_.apiHandle;
7953 apiInfo->handles[mode] = phandle;
7956 // Allocate necessary internal buffers.
7957 unsigned long bufferBytes;
7958 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7959 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7960 if ( stream_.userBuffer[mode] == NULL ) {
7961 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7965 if ( stream_.doConvertBuffer[mode] ) {
7967 bool makeBuffer = true;
7968 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7969 if ( mode == INPUT ) {
7970 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7971 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7972 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7977 bufferBytes *= *bufferSize;
7978 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7979 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7980 if ( stream_.deviceBuffer == NULL ) {
7981 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7987 stream_.sampleRate = sampleRate;
7988 stream_.nBuffers = periods;
7989 stream_.device[mode] = device;
7990 stream_.state = STREAM_STOPPED;
7992 // Setup the buffer conversion information structure.
7993 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7995 // Setup thread if necessary.
7996 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7997 // We had already set up an output stream.
7998 stream_.mode = DUPLEX;
7999 // Link the streams if possible.
8000 apiInfo->synchronized = false;
8001 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
8002 apiInfo->synchronized = true;
8004 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
8005 error( RtAudioError::WARNING );
8009 stream_.mode = mode;
8011 // Setup callback thread.
8012 stream_.callbackInfo.object = (void *) this;
8014 // Set the thread attributes for joinable and realtime scheduling
8015 // priority (optional). The higher priority will only take affect
8016 // if the program is run as root or suid. Note, under Linux
8017 // processes with CAP_SYS_NICE privilege, a user can change
8018 // scheduling policy and priority (thus need not be root). See
8019 // POSIX "capabilities".
8020 pthread_attr_t attr;
8021 pthread_attr_init( &attr );
8022 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8023 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8024 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8025 stream_.callbackInfo.doRealtime = true;
8026 struct sched_param param;
8027 int priority = options->priority;
8028 int min = sched_get_priority_min( SCHED_RR );
8029 int max = sched_get_priority_max( SCHED_RR );
8030 if ( priority < min ) priority = min;
8031 else if ( priority > max ) priority = max;
8032 param.sched_priority = priority;
8034 // Set the policy BEFORE the priority. Otherwise it fails.
8035 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8036 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8037 // This is definitely required. Otherwise it fails.
8038 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8039 pthread_attr_setschedparam(&attr, ¶m);
8042 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8044 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8047 stream_.callbackInfo.isRunning = true;
8048 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8049 pthread_attr_destroy( &attr );
8051 // Failed. Try instead with default attributes.
8052 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8054 stream_.callbackInfo.isRunning = false;
8055 errorText_ = "RtApiAlsa::error creating callback thread!";
8065 pthread_cond_destroy( &apiInfo->runnable_cv );
8066 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8067 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8069 stream_.apiHandle = 0;
8072 if ( phandle) snd_pcm_close( phandle );
8074 for ( int i=0; i<2; i++ ) {
8075 if ( stream_.userBuffer[i] ) {
8076 free( stream_.userBuffer[i] );
8077 stream_.userBuffer[i] = 0;
8081 if ( stream_.deviceBuffer ) {
8082 free( stream_.deviceBuffer );
8083 stream_.deviceBuffer = 0;
8086 stream_.state = STREAM_CLOSED;
8090 void RtApiAlsa :: closeStream()
8092 if ( stream_.state == STREAM_CLOSED ) {
8093 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8094 error( RtAudioError::WARNING );
8098 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8099 stream_.callbackInfo.isRunning = false;
8100 MUTEX_LOCK( &stream_.mutex );
8101 if ( stream_.state == STREAM_STOPPED ) {
8102 apiInfo->runnable = true;
8103 pthread_cond_signal( &apiInfo->runnable_cv );
8105 MUTEX_UNLOCK( &stream_.mutex );
8106 pthread_join( stream_.callbackInfo.thread, NULL );
8108 if ( stream_.state == STREAM_RUNNING ) {
8109 stream_.state = STREAM_STOPPED;
8110 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8111 snd_pcm_drop( apiInfo->handles[0] );
8112 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8113 snd_pcm_drop( apiInfo->handles[1] );
8117 pthread_cond_destroy( &apiInfo->runnable_cv );
8118 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8119 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8121 stream_.apiHandle = 0;
8124 for ( int i=0; i<2; i++ ) {
8125 if ( stream_.userBuffer[i] ) {
8126 free( stream_.userBuffer[i] );
8127 stream_.userBuffer[i] = 0;
8131 if ( stream_.deviceBuffer ) {
8132 free( stream_.deviceBuffer );
8133 stream_.deviceBuffer = 0;
8136 stream_.mode = UNINITIALIZED;
8137 stream_.state = STREAM_CLOSED;
8140 void RtApiAlsa :: startStream()
8142 // This method calls snd_pcm_prepare if the device isn't already in that state.
8145 if ( stream_.state == STREAM_RUNNING ) {
8146 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8147 error( RtAudioError::WARNING );
8151 MUTEX_LOCK( &stream_.mutex );
8153 #if defined( HAVE_GETTIMEOFDAY )
8154 gettimeofday( &stream_.lastTickTimestamp, NULL );
8158 snd_pcm_state_t state;
8159 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8160 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8161 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8162 state = snd_pcm_state( handle[0] );
8163 if ( state != SND_PCM_STATE_PREPARED ) {
8164 result = snd_pcm_prepare( handle[0] );
8166 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8167 errorText_ = errorStream_.str();
8173 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8174 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8175 state = snd_pcm_state( handle[1] );
8176 if ( state != SND_PCM_STATE_PREPARED ) {
8177 result = snd_pcm_prepare( handle[1] );
8179 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8180 errorText_ = errorStream_.str();
8186 stream_.state = STREAM_RUNNING;
8189 apiInfo->runnable = true;
8190 pthread_cond_signal( &apiInfo->runnable_cv );
8191 MUTEX_UNLOCK( &stream_.mutex );
8193 if ( result >= 0 ) return;
8194 error( RtAudioError::SYSTEM_ERROR );
8197 void RtApiAlsa :: stopStream()
8200 if ( stream_.state == STREAM_STOPPED ) {
8201 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8202 error( RtAudioError::WARNING );
8206 stream_.state = STREAM_STOPPED;
8207 MUTEX_LOCK( &stream_.mutex );
8210 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8211 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8212 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8213 if ( apiInfo->synchronized )
8214 result = snd_pcm_drop( handle[0] );
8216 result = snd_pcm_drain( handle[0] );
8218 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8219 errorText_ = errorStream_.str();
8224 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8225 result = snd_pcm_drop( handle[1] );
8227 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8228 errorText_ = errorStream_.str();
8234 apiInfo->runnable = false; // fixes high CPU usage when stopped
8235 MUTEX_UNLOCK( &stream_.mutex );
8237 if ( result >= 0 ) return;
8238 error( RtAudioError::SYSTEM_ERROR );
8241 void RtApiAlsa :: abortStream()
8244 if ( stream_.state == STREAM_STOPPED ) {
8245 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8246 error( RtAudioError::WARNING );
8250 stream_.state = STREAM_STOPPED;
8251 MUTEX_LOCK( &stream_.mutex );
8254 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8255 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8256 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8257 result = snd_pcm_drop( handle[0] );
8259 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8260 errorText_ = errorStream_.str();
8265 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8266 result = snd_pcm_drop( handle[1] );
8268 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8269 errorText_ = errorStream_.str();
8275 apiInfo->runnable = false; // fixes high CPU usage when stopped
8276 MUTEX_UNLOCK( &stream_.mutex );
8278 if ( result >= 0 ) return;
8279 error( RtAudioError::SYSTEM_ERROR );
8282 void RtApiAlsa :: callbackEvent()
8284 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8285 if ( stream_.state == STREAM_STOPPED ) {
8286 MUTEX_LOCK( &stream_.mutex );
8287 while ( !apiInfo->runnable )
8288 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8290 if ( stream_.state != STREAM_RUNNING ) {
8291 MUTEX_UNLOCK( &stream_.mutex );
8294 MUTEX_UNLOCK( &stream_.mutex );
8297 if ( stream_.state == STREAM_CLOSED ) {
8298 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8299 error( RtAudioError::WARNING );
8303 int doStopStream = 0;
8304 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8305 double streamTime = getStreamTime();
8306 RtAudioStreamStatus status = 0;
8307 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8308 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8309 apiInfo->xrun[0] = false;
8311 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8312 status |= RTAUDIO_INPUT_OVERFLOW;
8313 apiInfo->xrun[1] = false;
8315 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8316 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8318 if ( doStopStream == 2 ) {
8323 MUTEX_LOCK( &stream_.mutex );
8325 // The state might change while waiting on a mutex.
8326 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8332 snd_pcm_sframes_t frames;
8333 RtAudioFormat format;
8334 handle = (snd_pcm_t **) apiInfo->handles;
8336 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8338 // Setup parameters.
8339 if ( stream_.doConvertBuffer[1] ) {
8340 buffer = stream_.deviceBuffer;
8341 channels = stream_.nDeviceChannels[1];
8342 format = stream_.deviceFormat[1];
8345 buffer = stream_.userBuffer[1];
8346 channels = stream_.nUserChannels[1];
8347 format = stream_.userFormat;
8350 // Read samples from device in interleaved/non-interleaved format.
8351 if ( stream_.deviceInterleaved[1] )
8352 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8354 void *bufs[channels];
8355 size_t offset = stream_.bufferSize * formatBytes( format );
8356 for ( int i=0; i<channels; i++ )
8357 bufs[i] = (void *) (buffer + (i * offset));
8358 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8361 if ( result < (int) stream_.bufferSize ) {
8362 // Either an error or overrun occured.
8363 if ( result == -EPIPE ) {
8364 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8365 if ( state == SND_PCM_STATE_XRUN ) {
8366 apiInfo->xrun[1] = true;
8367 result = snd_pcm_prepare( handle[1] );
8369 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8370 errorText_ = errorStream_.str();
8374 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8375 errorText_ = errorStream_.str();
8379 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8380 errorText_ = errorStream_.str();
8382 error( RtAudioError::WARNING );
8386 // Do byte swapping if necessary.
8387 if ( stream_.doByteSwap[1] )
8388 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8390 // Do buffer conversion if necessary.
8391 if ( stream_.doConvertBuffer[1] )
8392 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8394 // Check stream latency
8395 result = snd_pcm_delay( handle[1], &frames );
8396 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8401 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8403 // Setup parameters and do buffer conversion if necessary.
8404 if ( stream_.doConvertBuffer[0] ) {
8405 buffer = stream_.deviceBuffer;
8406 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8407 channels = stream_.nDeviceChannels[0];
8408 format = stream_.deviceFormat[0];
8411 buffer = stream_.userBuffer[0];
8412 channels = stream_.nUserChannels[0];
8413 format = stream_.userFormat;
8416 // Do byte swapping if necessary.
8417 if ( stream_.doByteSwap[0] )
8418 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8420 // Write samples to device in interleaved/non-interleaved format.
8421 if ( stream_.deviceInterleaved[0] )
8422 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8424 void *bufs[channels];
8425 size_t offset = stream_.bufferSize * formatBytes( format );
8426 for ( int i=0; i<channels; i++ )
8427 bufs[i] = (void *) (buffer + (i * offset));
8428 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8431 if ( result < (int) stream_.bufferSize ) {
8432 // Either an error or underrun occured.
8433 if ( result == -EPIPE ) {
8434 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8435 if ( state == SND_PCM_STATE_XRUN ) {
8436 apiInfo->xrun[0] = true;
8437 result = snd_pcm_prepare( handle[0] );
8439 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8440 errorText_ = errorStream_.str();
8443 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8446 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8447 errorText_ = errorStream_.str();
8451 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8452 errorText_ = errorStream_.str();
8454 error( RtAudioError::WARNING );
8458 // Check stream latency
8459 result = snd_pcm_delay( handle[0], &frames );
8460 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8464 MUTEX_UNLOCK( &stream_.mutex );
8466 RtApi::tickStreamTime();
8467 if ( doStopStream == 1 ) this->stopStream();
8470 static void *alsaCallbackHandler( void *ptr )
8472 CallbackInfo *info = (CallbackInfo *) ptr;
8473 RtApiAlsa *object = (RtApiAlsa *) info->object;
8474 bool *isRunning = &info->isRunning;
8476 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8477 if ( info->doRealtime ) {
8478 std::cerr << "RtAudio alsa: " <<
8479 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8480 "running realtime scheduling" << std::endl;
8484 while ( *isRunning == true ) {
8485 pthread_testcancel();
8486 object->callbackEvent();
8489 pthread_exit( NULL );
8492 //******************** End of __LINUX_ALSA__ *********************//
8495 #if defined(__LINUX_PULSE__)
8497 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8498 // and Tristan Matthews.
8500 #include <pulse/error.h>
8501 #include <pulse/simple.h>
8504 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8505 44100, 48000, 96000, 0};
8507 struct rtaudio_pa_format_mapping_t {
8508 RtAudioFormat rtaudio_format;
8509 pa_sample_format_t pa_format;
8512 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8513 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8514 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8515 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8516 {0, PA_SAMPLE_INVALID}};
8518 struct PulseAudioHandle {
8522 pthread_cond_t runnable_cv;
8524 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8527 RtApiPulse::~RtApiPulse()
8529 if ( stream_.state != STREAM_CLOSED )
8533 unsigned int RtApiPulse::getDeviceCount( void )
8538 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8540 RtAudio::DeviceInfo info;
8542 info.name = "PulseAudio";
8543 info.outputChannels = 2;
8544 info.inputChannels = 2;
8545 info.duplexChannels = 2;
8546 info.isDefaultOutput = true;
8547 info.isDefaultInput = true;
8549 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8550 info.sampleRates.push_back( *sr );
8552 info.preferredSampleRate = 48000;
8553 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8558 static void *pulseaudio_callback( void * user )
8560 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8561 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8562 volatile bool *isRunning = &cbi->isRunning;
8564 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8565 if (cbi->doRealtime) {
8566 std::cerr << "RtAudio pulse: " <<
8567 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8568 "running realtime scheduling" << std::endl;
8572 while ( *isRunning ) {
8573 pthread_testcancel();
8574 context->callbackEvent();
8577 pthread_exit( NULL );
8580 void RtApiPulse::closeStream( void )
8582 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8584 stream_.callbackInfo.isRunning = false;
8586 MUTEX_LOCK( &stream_.mutex );
8587 if ( stream_.state == STREAM_STOPPED ) {
8588 pah->runnable = true;
8589 pthread_cond_signal( &pah->runnable_cv );
8591 MUTEX_UNLOCK( &stream_.mutex );
8593 pthread_join( pah->thread, 0 );
8594 if ( pah->s_play ) {
8595 pa_simple_flush( pah->s_play, NULL );
8596 pa_simple_free( pah->s_play );
8599 pa_simple_free( pah->s_rec );
8601 pthread_cond_destroy( &pah->runnable_cv );
8603 stream_.apiHandle = 0;
8606 if ( stream_.userBuffer[0] ) {
8607 free( stream_.userBuffer[0] );
8608 stream_.userBuffer[0] = 0;
8610 if ( stream_.userBuffer[1] ) {
8611 free( stream_.userBuffer[1] );
8612 stream_.userBuffer[1] = 0;
8615 stream_.state = STREAM_CLOSED;
8616 stream_.mode = UNINITIALIZED;
8619 void RtApiPulse::callbackEvent( void )
8621 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8623 if ( stream_.state == STREAM_STOPPED ) {
8624 MUTEX_LOCK( &stream_.mutex );
8625 while ( !pah->runnable )
8626 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8628 if ( stream_.state != STREAM_RUNNING ) {
8629 MUTEX_UNLOCK( &stream_.mutex );
8632 MUTEX_UNLOCK( &stream_.mutex );
8635 if ( stream_.state == STREAM_CLOSED ) {
8636 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8637 "this shouldn't happen!";
8638 error( RtAudioError::WARNING );
8642 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8643 double streamTime = getStreamTime();
8644 RtAudioStreamStatus status = 0;
8645 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8646 stream_.bufferSize, streamTime, status,
8647 stream_.callbackInfo.userData );
8649 if ( doStopStream == 2 ) {
8654 MUTEX_LOCK( &stream_.mutex );
8655 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8656 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8658 if ( stream_.state != STREAM_RUNNING )
8663 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8664 if ( stream_.doConvertBuffer[OUTPUT] ) {
8665 convertBuffer( stream_.deviceBuffer,
8666 stream_.userBuffer[OUTPUT],
8667 stream_.convertInfo[OUTPUT] );
8668 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8669 formatBytes( stream_.deviceFormat[OUTPUT] );
8671 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8672 formatBytes( stream_.userFormat );
8674 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8675 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8676 pa_strerror( pa_error ) << ".";
8677 errorText_ = errorStream_.str();
8678 error( RtAudioError::WARNING );
8682 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8683 if ( stream_.doConvertBuffer[INPUT] )
8684 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8685 formatBytes( stream_.deviceFormat[INPUT] );
8687 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8688 formatBytes( stream_.userFormat );
8690 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8691 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8692 pa_strerror( pa_error ) << ".";
8693 errorText_ = errorStream_.str();
8694 error( RtAudioError::WARNING );
8696 if ( stream_.doConvertBuffer[INPUT] ) {
8697 convertBuffer( stream_.userBuffer[INPUT],
8698 stream_.deviceBuffer,
8699 stream_.convertInfo[INPUT] );
8704 MUTEX_UNLOCK( &stream_.mutex );
8705 RtApi::tickStreamTime();
8707 if ( doStopStream == 1 )
8711 void RtApiPulse::startStream( void )
8713 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8715 if ( stream_.state == STREAM_CLOSED ) {
8716 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8717 error( RtAudioError::INVALID_USE );
8720 if ( stream_.state == STREAM_RUNNING ) {
8721 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8722 error( RtAudioError::WARNING );
8726 MUTEX_LOCK( &stream_.mutex );
8728 #if defined( HAVE_GETTIMEOFDAY )
8729 gettimeofday( &stream_.lastTickTimestamp, NULL );
8732 stream_.state = STREAM_RUNNING;
8734 pah->runnable = true;
8735 pthread_cond_signal( &pah->runnable_cv );
8736 MUTEX_UNLOCK( &stream_.mutex );
8739 void RtApiPulse::stopStream( void )
8741 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8743 if ( stream_.state == STREAM_CLOSED ) {
8744 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8745 error( RtAudioError::INVALID_USE );
8748 if ( stream_.state == STREAM_STOPPED ) {
8749 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8750 error( RtAudioError::WARNING );
8754 stream_.state = STREAM_STOPPED;
8755 MUTEX_LOCK( &stream_.mutex );
8757 if ( pah && pah->s_play ) {
8759 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8760 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8761 pa_strerror( pa_error ) << ".";
8762 errorText_ = errorStream_.str();
8763 MUTEX_UNLOCK( &stream_.mutex );
8764 error( RtAudioError::SYSTEM_ERROR );
8769 stream_.state = STREAM_STOPPED;
8770 MUTEX_UNLOCK( &stream_.mutex );
8773 void RtApiPulse::abortStream( void )
8775 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8777 if ( stream_.state == STREAM_CLOSED ) {
8778 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8779 error( RtAudioError::INVALID_USE );
8782 if ( stream_.state == STREAM_STOPPED ) {
8783 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8784 error( RtAudioError::WARNING );
8788 stream_.state = STREAM_STOPPED;
8789 MUTEX_LOCK( &stream_.mutex );
8791 if ( pah && pah->s_play ) {
8793 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8794 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8795 pa_strerror( pa_error ) << ".";
8796 errorText_ = errorStream_.str();
8797 MUTEX_UNLOCK( &stream_.mutex );
8798 error( RtAudioError::SYSTEM_ERROR );
8803 stream_.state = STREAM_STOPPED;
8804 MUTEX_UNLOCK( &stream_.mutex );
8807 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8808 unsigned int channels, unsigned int firstChannel,
8809 unsigned int sampleRate, RtAudioFormat format,
8810 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8812 PulseAudioHandle *pah = 0;
8813 unsigned long bufferBytes = 0;
8816 if ( device != 0 ) return false;
8817 if ( mode != INPUT && mode != OUTPUT ) return false;
8818 if ( channels != 1 && channels != 2 ) {
8819 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8822 ss.channels = channels;
8824 if ( firstChannel != 0 ) return false;
8826 bool sr_found = false;
8827 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8828 if ( sampleRate == *sr ) {
8830 stream_.sampleRate = sampleRate;
8831 ss.rate = sampleRate;
8836 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8841 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8842 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8843 if ( format == sf->rtaudio_format ) {
8845 stream_.userFormat = sf->rtaudio_format;
8846 stream_.deviceFormat[mode] = stream_.userFormat;
8847 ss.format = sf->pa_format;
8851 if ( !sf_found ) { // Use internal data format conversion.
8852 stream_.userFormat = format;
8853 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8854 ss.format = PA_SAMPLE_FLOAT32LE;
8857 // Set other stream parameters.
8858 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8859 else stream_.userInterleaved = true;
8860 stream_.deviceInterleaved[mode] = true;
8861 stream_.nBuffers = 1;
8862 stream_.doByteSwap[mode] = false;
8863 stream_.nUserChannels[mode] = channels;
8864 stream_.nDeviceChannels[mode] = channels + firstChannel;
8865 stream_.channelOffset[mode] = 0;
8866 std::string streamName = "RtAudio";
8868 // Set flags for buffer conversion.
8869 stream_.doConvertBuffer[mode] = false;
8870 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8871 stream_.doConvertBuffer[mode] = true;
8872 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8873 stream_.doConvertBuffer[mode] = true;
8875 // Allocate necessary internal buffers.
8876 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8877 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8878 if ( stream_.userBuffer[mode] == NULL ) {
8879 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8882 stream_.bufferSize = *bufferSize;
8884 if ( stream_.doConvertBuffer[mode] ) {
8886 bool makeBuffer = true;
8887 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8888 if ( mode == INPUT ) {
8889 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8890 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8891 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8896 bufferBytes *= *bufferSize;
8897 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8898 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8899 if ( stream_.deviceBuffer == NULL ) {
8900 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8906 stream_.device[mode] = device;
8908 // Setup the buffer conversion information structure.
8909 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8911 if ( !stream_.apiHandle ) {
8912 PulseAudioHandle *pah = new PulseAudioHandle;
8914 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8918 stream_.apiHandle = pah;
8919 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8920 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8924 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8927 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8930 pa_buffer_attr buffer_attr;
8931 buffer_attr.fragsize = bufferBytes;
8932 buffer_attr.maxlength = -1;
8934 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8935 if ( !pah->s_rec ) {
8936 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8941 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8942 if ( !pah->s_play ) {
8943 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8951 if ( stream_.mode == UNINITIALIZED )
8952 stream_.mode = mode;
8953 else if ( stream_.mode == mode )
8956 stream_.mode = DUPLEX;
8958 if ( !stream_.callbackInfo.isRunning ) {
8959 stream_.callbackInfo.object = this;
8961 stream_.state = STREAM_STOPPED;
8962 // Set the thread attributes for joinable and realtime scheduling
8963 // priority (optional). The higher priority will only take affect
8964 // if the program is run as root or suid. Note, under Linux
8965 // processes with CAP_SYS_NICE privilege, a user can change
8966 // scheduling policy and priority (thus need not be root). See
8967 // POSIX "capabilities".
8968 pthread_attr_t attr;
8969 pthread_attr_init( &attr );
8970 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8971 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8972 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8973 stream_.callbackInfo.doRealtime = true;
8974 struct sched_param param;
8975 int priority = options->priority;
8976 int min = sched_get_priority_min( SCHED_RR );
8977 int max = sched_get_priority_max( SCHED_RR );
8978 if ( priority < min ) priority = min;
8979 else if ( priority > max ) priority = max;
8980 param.sched_priority = priority;
8982 // Set the policy BEFORE the priority. Otherwise it fails.
8983 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8984 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8985 // This is definitely required. Otherwise it fails.
8986 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8987 pthread_attr_setschedparam(&attr, ¶m);
8990 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8992 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8995 stream_.callbackInfo.isRunning = true;
8996 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8997 pthread_attr_destroy(&attr);
8999 // Failed. Try instead with default attributes.
9000 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
9002 stream_.callbackInfo.isRunning = false;
9003 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
9012 if ( pah && stream_.callbackInfo.isRunning ) {
9013 pthread_cond_destroy( &pah->runnable_cv );
9015 stream_.apiHandle = 0;
9018 for ( int i=0; i<2; i++ ) {
9019 if ( stream_.userBuffer[i] ) {
9020 free( stream_.userBuffer[i] );
9021 stream_.userBuffer[i] = 0;
9025 if ( stream_.deviceBuffer ) {
9026 free( stream_.deviceBuffer );
9027 stream_.deviceBuffer = 0;
9030 stream_.state = STREAM_CLOSED;
9034 //******************** End of __LINUX_PULSE__ *********************//
9037 #if defined(__LINUX_OSS__)
9040 #include <sys/ioctl.h>
9043 #include <sys/soundcard.h>
9047 static void *ossCallbackHandler(void * ptr);
9049 // A structure to hold various information related to the OSS API
9052 int id[2]; // device ids
9055 pthread_cond_t runnable;
9058 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9061 RtApiOss :: RtApiOss()
9063 // Nothing to do here.
9066 RtApiOss :: ~RtApiOss()
9068 if ( stream_.state != STREAM_CLOSED ) closeStream();
9071 unsigned int RtApiOss :: getDeviceCount( void )
9073 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9074 if ( mixerfd == -1 ) {
9075 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9076 error( RtAudioError::WARNING );
9080 oss_sysinfo sysinfo;
9081 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9083 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9084 error( RtAudioError::WARNING );
9089 return sysinfo.numaudios;
9092 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9094 RtAudio::DeviceInfo info;
9095 info.probed = false;
9097 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9098 if ( mixerfd == -1 ) {
9099 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9100 error( RtAudioError::WARNING );
9104 oss_sysinfo sysinfo;
9105 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9106 if ( result == -1 ) {
9108 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9109 error( RtAudioError::WARNING );
9113 unsigned nDevices = sysinfo.numaudios;
9114 if ( nDevices == 0 ) {
9116 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9117 error( RtAudioError::INVALID_USE );
9121 if ( device >= nDevices ) {
9123 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9124 error( RtAudioError::INVALID_USE );
9128 oss_audioinfo ainfo;
9130 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9132 if ( result == -1 ) {
9133 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9134 errorText_ = errorStream_.str();
9135 error( RtAudioError::WARNING );
9140 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9141 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9142 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9143 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9144 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9147 // Probe data formats ... do for input
9148 unsigned long mask = ainfo.iformats;
9149 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9150 info.nativeFormats |= RTAUDIO_SINT16;
9151 if ( mask & AFMT_S8 )
9152 info.nativeFormats |= RTAUDIO_SINT8;
9153 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9154 info.nativeFormats |= RTAUDIO_SINT32;
9156 if ( mask & AFMT_FLOAT )
9157 info.nativeFormats |= RTAUDIO_FLOAT32;
9159 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9160 info.nativeFormats |= RTAUDIO_SINT24;
9162 // Check that we have at least one supported format
9163 if ( info.nativeFormats == 0 ) {
9164 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9165 errorText_ = errorStream_.str();
9166 error( RtAudioError::WARNING );
9170 // Probe the supported sample rates.
9171 info.sampleRates.clear();
9172 if ( ainfo.nrates ) {
9173 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9174 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9175 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9176 info.sampleRates.push_back( SAMPLE_RATES[k] );
9178 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9179 info.preferredSampleRate = SAMPLE_RATES[k];
9187 // Check min and max rate values;
9188 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9189 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9190 info.sampleRates.push_back( SAMPLE_RATES[k] );
9192 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9193 info.preferredSampleRate = SAMPLE_RATES[k];
9198 if ( info.sampleRates.size() == 0 ) {
9199 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9200 errorText_ = errorStream_.str();
9201 error( RtAudioError::WARNING );
9205 info.name = ainfo.name;
9212 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9213 unsigned int firstChannel, unsigned int sampleRate,
9214 RtAudioFormat format, unsigned int *bufferSize,
9215 RtAudio::StreamOptions *options )
9217 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9218 if ( mixerfd == -1 ) {
9219 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9223 oss_sysinfo sysinfo;
9224 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9225 if ( result == -1 ) {
9227 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9231 unsigned nDevices = sysinfo.numaudios;
9232 if ( nDevices == 0 ) {
9233 // This should not happen because a check is made before this function is called.
9235 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9239 if ( device >= nDevices ) {
9240 // This should not happen because a check is made before this function is called.
9242 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9246 oss_audioinfo ainfo;
9248 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9250 if ( result == -1 ) {
9251 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9252 errorText_ = errorStream_.str();
9256 // Check if device supports input or output
9257 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9258 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9259 if ( mode == OUTPUT )
9260 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9262 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9263 errorText_ = errorStream_.str();
9268 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9269 if ( mode == OUTPUT )
9271 else { // mode == INPUT
9272 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9273 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9274 close( handle->id[0] );
9276 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9277 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9278 errorText_ = errorStream_.str();
9281 // Check that the number previously set channels is the same.
9282 if ( stream_.nUserChannels[0] != channels ) {
9283 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9284 errorText_ = errorStream_.str();
9293 // Set exclusive access if specified.
9294 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9296 // Try to open the device.
9298 fd = open( ainfo.devnode, flags, 0 );
9300 if ( errno == EBUSY )
9301 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9303 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9304 errorText_ = errorStream_.str();
9308 // For duplex operation, specifically set this mode (this doesn't seem to work).
9310 if ( flags | O_RDWR ) {
9311 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9312 if ( result == -1) {
9313 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9314 errorText_ = errorStream_.str();
9320 // Check the device channel support.
9321 stream_.nUserChannels[mode] = channels;
9322 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9324 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9325 errorText_ = errorStream_.str();
9329 // Set the number of channels.
9330 int deviceChannels = channels + firstChannel;
9331 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9332 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9334 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9335 errorText_ = errorStream_.str();
9338 stream_.nDeviceChannels[mode] = deviceChannels;
9340 // Get the data format mask
9342 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9343 if ( result == -1 ) {
9345 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9346 errorText_ = errorStream_.str();
9350 // Determine how to set the device format.
9351 stream_.userFormat = format;
9352 int deviceFormat = -1;
9353 stream_.doByteSwap[mode] = false;
9354 if ( format == RTAUDIO_SINT8 ) {
9355 if ( mask & AFMT_S8 ) {
9356 deviceFormat = AFMT_S8;
9357 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9360 else if ( format == RTAUDIO_SINT16 ) {
9361 if ( mask & AFMT_S16_NE ) {
9362 deviceFormat = AFMT_S16_NE;
9363 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9365 else if ( mask & AFMT_S16_OE ) {
9366 deviceFormat = AFMT_S16_OE;
9367 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9368 stream_.doByteSwap[mode] = true;
9371 else if ( format == RTAUDIO_SINT24 ) {
9372 if ( mask & AFMT_S24_NE ) {
9373 deviceFormat = AFMT_S24_NE;
9374 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9376 else if ( mask & AFMT_S24_OE ) {
9377 deviceFormat = AFMT_S24_OE;
9378 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9379 stream_.doByteSwap[mode] = true;
9382 else if ( format == RTAUDIO_SINT32 ) {
9383 if ( mask & AFMT_S32_NE ) {
9384 deviceFormat = AFMT_S32_NE;
9385 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9387 else if ( mask & AFMT_S32_OE ) {
9388 deviceFormat = AFMT_S32_OE;
9389 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9390 stream_.doByteSwap[mode] = true;
9394 if ( deviceFormat == -1 ) {
9395 // The user requested format is not natively supported by the device.
9396 if ( mask & AFMT_S16_NE ) {
9397 deviceFormat = AFMT_S16_NE;
9398 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9400 else if ( mask & AFMT_S32_NE ) {
9401 deviceFormat = AFMT_S32_NE;
9402 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9404 else if ( mask & AFMT_S24_NE ) {
9405 deviceFormat = AFMT_S24_NE;
9406 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9408 else if ( mask & AFMT_S16_OE ) {
9409 deviceFormat = AFMT_S16_OE;
9410 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9411 stream_.doByteSwap[mode] = true;
9413 else if ( mask & AFMT_S32_OE ) {
9414 deviceFormat = AFMT_S32_OE;
9415 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9416 stream_.doByteSwap[mode] = true;
9418 else if ( mask & AFMT_S24_OE ) {
9419 deviceFormat = AFMT_S24_OE;
9420 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9421 stream_.doByteSwap[mode] = true;
9423 else if ( mask & AFMT_S8) {
9424 deviceFormat = AFMT_S8;
9425 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9429 if ( stream_.deviceFormat[mode] == 0 ) {
9430 // This really shouldn't happen ...
9432 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9433 errorText_ = errorStream_.str();
9437 // Set the data format.
9438 int temp = deviceFormat;
9439 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9440 if ( result == -1 || deviceFormat != temp ) {
9442 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9443 errorText_ = errorStream_.str();
9447 // Attempt to set the buffer size. According to OSS, the minimum
9448 // number of buffers is two. The supposed minimum buffer size is 16
9449 // bytes, so that will be our lower bound. The argument to this
9450 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9451 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9452 // We'll check the actual value used near the end of the setup
9454 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9455 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9457 if ( options ) buffers = options->numberOfBuffers;
9458 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9459 if ( buffers < 2 ) buffers = 3;
9460 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9461 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9462 if ( result == -1 ) {
9464 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9465 errorText_ = errorStream_.str();
9468 stream_.nBuffers = buffers;
9470 // Save buffer size (in sample frames).
9471 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9472 stream_.bufferSize = *bufferSize;
9474 // Set the sample rate.
9475 int srate = sampleRate;
9476 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9477 if ( result == -1 ) {
9479 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9480 errorText_ = errorStream_.str();
9484 // Verify the sample rate setup worked.
9485 if ( abs( srate - (int)sampleRate ) > 100 ) {
9487 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9488 errorText_ = errorStream_.str();
9491 stream_.sampleRate = sampleRate;
9493 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9494 // We're doing duplex setup here.
9495 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9496 stream_.nDeviceChannels[0] = deviceChannels;
9499 // Set interleaving parameters.
9500 stream_.userInterleaved = true;
9501 stream_.deviceInterleaved[mode] = true;
9502 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9503 stream_.userInterleaved = false;
9505 // Set flags for buffer conversion
9506 stream_.doConvertBuffer[mode] = false;
9507 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9508 stream_.doConvertBuffer[mode] = true;
9509 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9510 stream_.doConvertBuffer[mode] = true;
9511 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9512 stream_.nUserChannels[mode] > 1 )
9513 stream_.doConvertBuffer[mode] = true;
9515 // Allocate the stream handles if necessary and then save.
9516 if ( stream_.apiHandle == 0 ) {
9518 handle = new OssHandle;
9520 catch ( std::bad_alloc& ) {
9521 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9525 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9526 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9530 stream_.apiHandle = (void *) handle;
9533 handle = (OssHandle *) stream_.apiHandle;
9535 handle->id[mode] = fd;
9537 // Allocate necessary internal buffers.
9538 unsigned long bufferBytes;
9539 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9540 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9541 if ( stream_.userBuffer[mode] == NULL ) {
9542 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9546 if ( stream_.doConvertBuffer[mode] ) {
9548 bool makeBuffer = true;
9549 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9550 if ( mode == INPUT ) {
9551 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9552 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9553 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9558 bufferBytes *= *bufferSize;
9559 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9560 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9561 if ( stream_.deviceBuffer == NULL ) {
9562 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9568 stream_.device[mode] = device;
9569 stream_.state = STREAM_STOPPED;
9571 // Setup the buffer conversion information structure.
9572 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9574 // Setup thread if necessary.
9575 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9576 // We had already set up an output stream.
9577 stream_.mode = DUPLEX;
9578 if ( stream_.device[0] == device ) handle->id[0] = fd;
9581 stream_.mode = mode;
9583 // Setup callback thread.
9584 stream_.callbackInfo.object = (void *) this;
9586 // Set the thread attributes for joinable and realtime scheduling
9587 // priority. The higher priority will only take affect if the
9588 // program is run as root or suid.
9589 pthread_attr_t attr;
9590 pthread_attr_init( &attr );
9591 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9592 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9593 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9594 stream_.callbackInfo.doRealtime = true;
9595 struct sched_param param;
9596 int priority = options->priority;
9597 int min = sched_get_priority_min( SCHED_RR );
9598 int max = sched_get_priority_max( SCHED_RR );
9599 if ( priority < min ) priority = min;
9600 else if ( priority > max ) priority = max;
9601 param.sched_priority = priority;
9603 // Set the policy BEFORE the priority. Otherwise it fails.
9604 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9605 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9606 // This is definitely required. Otherwise it fails.
9607 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9608 pthread_attr_setschedparam(&attr, ¶m);
9611 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9613 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9616 stream_.callbackInfo.isRunning = true;
9617 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9618 pthread_attr_destroy( &attr );
9620 // Failed. Try instead with default attributes.
9621 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9623 stream_.callbackInfo.isRunning = false;
9624 errorText_ = "RtApiOss::error creating callback thread!";
9634 pthread_cond_destroy( &handle->runnable );
9635 if ( handle->id[0] ) close( handle->id[0] );
9636 if ( handle->id[1] ) close( handle->id[1] );
9638 stream_.apiHandle = 0;
9641 for ( int i=0; i<2; i++ ) {
9642 if ( stream_.userBuffer[i] ) {
9643 free( stream_.userBuffer[i] );
9644 stream_.userBuffer[i] = 0;
9648 if ( stream_.deviceBuffer ) {
9649 free( stream_.deviceBuffer );
9650 stream_.deviceBuffer = 0;
9653 stream_.state = STREAM_CLOSED;
9657 void RtApiOss :: closeStream()
9659 if ( stream_.state == STREAM_CLOSED ) {
9660 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9661 error( RtAudioError::WARNING );
9665 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9666 stream_.callbackInfo.isRunning = false;
9667 MUTEX_LOCK( &stream_.mutex );
9668 if ( stream_.state == STREAM_STOPPED )
9669 pthread_cond_signal( &handle->runnable );
9670 MUTEX_UNLOCK( &stream_.mutex );
9671 pthread_join( stream_.callbackInfo.thread, NULL );
9673 if ( stream_.state == STREAM_RUNNING ) {
9674 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9675 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9677 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9678 stream_.state = STREAM_STOPPED;
9682 pthread_cond_destroy( &handle->runnable );
9683 if ( handle->id[0] ) close( handle->id[0] );
9684 if ( handle->id[1] ) close( handle->id[1] );
9686 stream_.apiHandle = 0;
9689 for ( int i=0; i<2; i++ ) {
9690 if ( stream_.userBuffer[i] ) {
9691 free( stream_.userBuffer[i] );
9692 stream_.userBuffer[i] = 0;
9696 if ( stream_.deviceBuffer ) {
9697 free( stream_.deviceBuffer );
9698 stream_.deviceBuffer = 0;
9701 stream_.mode = UNINITIALIZED;
9702 stream_.state = STREAM_CLOSED;
9705 void RtApiOss :: startStream()
9708 if ( stream_.state == STREAM_RUNNING ) {
9709 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9710 error( RtAudioError::WARNING );
9714 MUTEX_LOCK( &stream_.mutex );
9716 #if defined( HAVE_GETTIMEOFDAY )
9717 gettimeofday( &stream_.lastTickTimestamp, NULL );
9720 stream_.state = STREAM_RUNNING;
9722 // No need to do anything else here ... OSS automatically starts
9723 // when fed samples.
9725 MUTEX_UNLOCK( &stream_.mutex );
9727 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9728 pthread_cond_signal( &handle->runnable );
9731 void RtApiOss :: stopStream()
9734 if ( stream_.state == STREAM_STOPPED ) {
9735 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9736 error( RtAudioError::WARNING );
9740 MUTEX_LOCK( &stream_.mutex );
9742 // The state might change while waiting on a mutex.
9743 if ( stream_.state == STREAM_STOPPED ) {
9744 MUTEX_UNLOCK( &stream_.mutex );
9749 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9750 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9752 // Flush the output with zeros a few times.
9755 RtAudioFormat format;
9757 if ( stream_.doConvertBuffer[0] ) {
9758 buffer = stream_.deviceBuffer;
9759 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9760 format = stream_.deviceFormat[0];
9763 buffer = stream_.userBuffer[0];
9764 samples = stream_.bufferSize * stream_.nUserChannels[0];
9765 format = stream_.userFormat;
9768 memset( buffer, 0, samples * formatBytes(format) );
9769 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9770 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9771 if ( result == -1 ) {
9772 errorText_ = "RtApiOss::stopStream: audio write error.";
9773 error( RtAudioError::WARNING );
9777 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9778 if ( result == -1 ) {
9779 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9780 errorText_ = errorStream_.str();
9783 handle->triggered = false;
9786 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9787 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9788 if ( result == -1 ) {
9789 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9790 errorText_ = errorStream_.str();
9796 stream_.state = STREAM_STOPPED;
9797 MUTEX_UNLOCK( &stream_.mutex );
9799 if ( result != -1 ) return;
9800 error( RtAudioError::SYSTEM_ERROR );
9803 void RtApiOss :: abortStream()
9806 if ( stream_.state == STREAM_STOPPED ) {
9807 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9808 error( RtAudioError::WARNING );
9812 MUTEX_LOCK( &stream_.mutex );
9814 // The state might change while waiting on a mutex.
9815 if ( stream_.state == STREAM_STOPPED ) {
9816 MUTEX_UNLOCK( &stream_.mutex );
9821 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9822 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9823 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9824 if ( result == -1 ) {
9825 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9826 errorText_ = errorStream_.str();
9829 handle->triggered = false;
9832 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9833 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9834 if ( result == -1 ) {
9835 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9836 errorText_ = errorStream_.str();
9842 stream_.state = STREAM_STOPPED;
9843 MUTEX_UNLOCK( &stream_.mutex );
9845 if ( result != -1 ) return;
9846 error( RtAudioError::SYSTEM_ERROR );
9849 void RtApiOss :: callbackEvent()
9851 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9852 if ( stream_.state == STREAM_STOPPED ) {
9853 MUTEX_LOCK( &stream_.mutex );
9854 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9855 if ( stream_.state != STREAM_RUNNING ) {
9856 MUTEX_UNLOCK( &stream_.mutex );
9859 MUTEX_UNLOCK( &stream_.mutex );
9862 if ( stream_.state == STREAM_CLOSED ) {
9863 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9864 error( RtAudioError::WARNING );
9868 // Invoke user callback to get fresh output data.
9869 int doStopStream = 0;
9870 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9871 double streamTime = getStreamTime();
9872 RtAudioStreamStatus status = 0;
9873 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9874 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9875 handle->xrun[0] = false;
9877 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9878 status |= RTAUDIO_INPUT_OVERFLOW;
9879 handle->xrun[1] = false;
9881 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9882 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9883 if ( doStopStream == 2 ) {
9884 this->abortStream();
9888 MUTEX_LOCK( &stream_.mutex );
9890 // The state might change while waiting on a mutex.
9891 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9896 RtAudioFormat format;
9898 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9900 // Setup parameters and do buffer conversion if necessary.
9901 if ( stream_.doConvertBuffer[0] ) {
9902 buffer = stream_.deviceBuffer;
9903 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9904 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9905 format = stream_.deviceFormat[0];
9908 buffer = stream_.userBuffer[0];
9909 samples = stream_.bufferSize * stream_.nUserChannels[0];
9910 format = stream_.userFormat;
9913 // Do byte swapping if necessary.
9914 if ( stream_.doByteSwap[0] )
9915 byteSwapBuffer( buffer, samples, format );
9917 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9919 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9920 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9921 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9922 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9923 handle->triggered = true;
9926 // Write samples to device.
9927 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9929 if ( result == -1 ) {
9930 // We'll assume this is an underrun, though there isn't a
9931 // specific means for determining that.
9932 handle->xrun[0] = true;
9933 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9934 error( RtAudioError::WARNING );
9935 // Continue on to input section.
9939 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9941 // Setup parameters.
9942 if ( stream_.doConvertBuffer[1] ) {
9943 buffer = stream_.deviceBuffer;
9944 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9945 format = stream_.deviceFormat[1];
9948 buffer = stream_.userBuffer[1];
9949 samples = stream_.bufferSize * stream_.nUserChannels[1];
9950 format = stream_.userFormat;
9953 // Read samples from device.
9954 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9956 if ( result == -1 ) {
9957 // We'll assume this is an overrun, though there isn't a
9958 // specific means for determining that.
9959 handle->xrun[1] = true;
9960 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9961 error( RtAudioError::WARNING );
9965 // Do byte swapping if necessary.
9966 if ( stream_.doByteSwap[1] )
9967 byteSwapBuffer( buffer, samples, format );
9969 // Do buffer conversion if necessary.
9970 if ( stream_.doConvertBuffer[1] )
9971 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9975 MUTEX_UNLOCK( &stream_.mutex );
9977 RtApi::tickStreamTime();
9978 if ( doStopStream == 1 ) this->stopStream();
9981 static void *ossCallbackHandler( void *ptr )
9983 CallbackInfo *info = (CallbackInfo *) ptr;
9984 RtApiOss *object = (RtApiOss *) info->object;
9985 bool *isRunning = &info->isRunning;
9987 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9988 if (info->doRealtime) {
9989 std::cerr << "RtAudio oss: " <<
9990 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9991 "running realtime scheduling" << std::endl;
9995 while ( *isRunning == true ) {
9996 pthread_testcancel();
9997 object->callbackEvent();
10000 pthread_exit( NULL );
10003 //******************** End of __LINUX_OSS__ *********************//
10007 // *************************************************** //
10009 // Protected common (OS-independent) RtAudio methods.
10011 // *************************************************** //
10013 // This method can be modified to control the behavior of error
10014 // message printing.
10015 RtAudioErrorType RtApi :: error( RtAudioErrorType type )
10017 errorStream_.str(""); // clear the ostringstream to avoid repeated messages
10019 // Don't output warnings if showWarnings_ is false
10020 if ( type == RTAUDIO_WARNING && showWarnings_ == false ) return type;
10022 if ( errorCallback_ ) {
10023 const std::string errorMessage = errorText_;
10024 errorCallback_( type, errorMessage );
10027 std::cerr << '\n' << errorText_ << "\n\n";
10032 void RtApi :: verifyStream()
10034 if ( stream_.state == STREAM_CLOSED ) {
10035 errorText_ = "RtApi:: a stream is not open!";
10036 error( RtAudioError::INVALID_USE );
10041 void RtApi :: clearStreamInfo()
10043 stream_.mode = UNINITIALIZED;
10044 stream_.state = STREAM_CLOSED;
10045 stream_.sampleRate = 0;
10046 stream_.bufferSize = 0;
10047 stream_.nBuffers = 0;
10048 stream_.userFormat = 0;
10049 stream_.userInterleaved = true;
10050 stream_.streamTime = 0.0;
10051 stream_.apiHandle = 0;
10052 stream_.deviceBuffer = 0;
10053 stream_.callbackInfo.callback = 0;
10054 stream_.callbackInfo.userData = 0;
10055 stream_.callbackInfo.isRunning = false;
10056 stream_.callbackInfo.deviceDisconnected = false;
10057 for ( int i=0; i<2; i++ ) {
10058 stream_.device[i] = 11111;
10059 stream_.doConvertBuffer[i] = false;
10060 stream_.deviceInterleaved[i] = true;
10061 stream_.doByteSwap[i] = false;
10062 stream_.nUserChannels[i] = 0;
10063 stream_.nDeviceChannels[i] = 0;
10064 stream_.channelOffset[i] = 0;
10065 stream_.deviceFormat[i] = 0;
10066 stream_.latency[i] = 0;
10067 stream_.userBuffer[i] = 0;
10068 stream_.convertInfo[i].channels = 0;
10069 stream_.convertInfo[i].inJump = 0;
10070 stream_.convertInfo[i].outJump = 0;
10071 stream_.convertInfo[i].inFormat = 0;
10072 stream_.convertInfo[i].outFormat = 0;
10073 stream_.convertInfo[i].inOffset.clear();
10074 stream_.convertInfo[i].outOffset.clear();
10078 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10080 if ( format == RTAUDIO_SINT16 )
10082 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10084 else if ( format == RTAUDIO_FLOAT64 )
10086 else if ( format == RTAUDIO_SINT24 )
10088 else if ( format == RTAUDIO_SINT8 )
10091 errorText_ = "RtApi::formatBytes: undefined format.";
10092 error( RTAUDIO_WARNING );
10097 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10099 if ( mode == INPUT ) { // convert device to user buffer
10100 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10101 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10102 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10103 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10105 else { // convert user to device buffer
10106 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10107 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10108 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10109 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10112 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10113 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10115 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10117 // Set up the interleave/deinterleave offsets.
10118 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10119 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10120 ( mode == INPUT && stream_.userInterleaved ) ) {
10121 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10122 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10123 stream_.convertInfo[mode].outOffset.push_back( k );
10124 stream_.convertInfo[mode].inJump = 1;
10128 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10129 stream_.convertInfo[mode].inOffset.push_back( k );
10130 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10131 stream_.convertInfo[mode].outJump = 1;
10135 else { // no (de)interleaving
10136 if ( stream_.userInterleaved ) {
10137 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10138 stream_.convertInfo[mode].inOffset.push_back( k );
10139 stream_.convertInfo[mode].outOffset.push_back( k );
10143 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10144 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10145 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10146 stream_.convertInfo[mode].inJump = 1;
10147 stream_.convertInfo[mode].outJump = 1;
10152 // Add channel offset.
10153 if ( firstChannel > 0 ) {
10154 if ( stream_.deviceInterleaved[mode] ) {
10155 if ( mode == OUTPUT ) {
10156 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10157 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10160 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10161 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10165 if ( mode == OUTPUT ) {
10166 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10167 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10170 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10171 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10177 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10179 // This function does format conversion, input/output channel compensation, and
10180 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10181 // the lower three bytes of a 32-bit integer.
10183 // Clear our device buffer when in/out duplex device channels are different
10184 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10185 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10186 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10189 if (info.outFormat == RTAUDIO_FLOAT64) {
10191 Float64 *out = (Float64 *)outBuffer;
10193 if (info.inFormat == RTAUDIO_SINT8) {
10194 signed char *in = (signed char *)inBuffer;
10195 scale = 1.0 / 127.5;
10196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10197 for (j=0; j<info.channels; j++) {
10198 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10199 out[info.outOffset[j]] += 0.5;
10200 out[info.outOffset[j]] *= scale;
10203 out += info.outJump;
10206 else if (info.inFormat == RTAUDIO_SINT16) {
10207 Int16 *in = (Int16 *)inBuffer;
10208 scale = 1.0 / 32767.5;
10209 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10210 for (j=0; j<info.channels; j++) {
10211 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10212 out[info.outOffset[j]] += 0.5;
10213 out[info.outOffset[j]] *= scale;
10216 out += info.outJump;
10219 else if (info.inFormat == RTAUDIO_SINT24) {
10220 Int24 *in = (Int24 *)inBuffer;
10221 scale = 1.0 / 8388607.5;
10222 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10223 for (j=0; j<info.channels; j++) {
10224 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10225 out[info.outOffset[j]] += 0.5;
10226 out[info.outOffset[j]] *= scale;
10229 out += info.outJump;
10232 else if (info.inFormat == RTAUDIO_SINT32) {
10233 Int32 *in = (Int32 *)inBuffer;
10234 scale = 1.0 / 2147483647.5;
10235 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10236 for (j=0; j<info.channels; j++) {
10237 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10238 out[info.outOffset[j]] += 0.5;
10239 out[info.outOffset[j]] *= scale;
10242 out += info.outJump;
10245 else if (info.inFormat == RTAUDIO_FLOAT32) {
10246 Float32 *in = (Float32 *)inBuffer;
10247 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10248 for (j=0; j<info.channels; j++) {
10249 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10252 out += info.outJump;
10255 else if (info.inFormat == RTAUDIO_FLOAT64) {
10256 // Channel compensation and/or (de)interleaving only.
10257 Float64 *in = (Float64 *)inBuffer;
10258 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10259 for (j=0; j<info.channels; j++) {
10260 out[info.outOffset[j]] = in[info.inOffset[j]];
10263 out += info.outJump;
10267 else if (info.outFormat == RTAUDIO_FLOAT32) {
10269 Float32 *out = (Float32 *)outBuffer;
10271 if (info.inFormat == RTAUDIO_SINT8) {
10272 signed char *in = (signed char *)inBuffer;
10273 scale = (Float32) ( 1.0 / 127.5 );
10274 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10275 for (j=0; j<info.channels; j++) {
10276 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10277 out[info.outOffset[j]] += 0.5;
10278 out[info.outOffset[j]] *= scale;
10281 out += info.outJump;
10284 else if (info.inFormat == RTAUDIO_SINT16) {
10285 Int16 *in = (Int16 *)inBuffer;
10286 scale = (Float32) ( 1.0 / 32767.5 );
10287 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10288 for (j=0; j<info.channels; j++) {
10289 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10290 out[info.outOffset[j]] += 0.5;
10291 out[info.outOffset[j]] *= scale;
10294 out += info.outJump;
10297 else if (info.inFormat == RTAUDIO_SINT24) {
10298 Int24 *in = (Int24 *)inBuffer;
10299 scale = (Float32) ( 1.0 / 8388607.5 );
10300 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10301 for (j=0; j<info.channels; j++) {
10302 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10303 out[info.outOffset[j]] += 0.5;
10304 out[info.outOffset[j]] *= scale;
10307 out += info.outJump;
10310 else if (info.inFormat == RTAUDIO_SINT32) {
10311 Int32 *in = (Int32 *)inBuffer;
10312 scale = (Float32) ( 1.0 / 2147483647.5 );
10313 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10314 for (j=0; j<info.channels; j++) {
10315 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10316 out[info.outOffset[j]] += 0.5;
10317 out[info.outOffset[j]] *= scale;
10320 out += info.outJump;
10323 else if (info.inFormat == RTAUDIO_FLOAT32) {
10324 // Channel compensation and/or (de)interleaving only.
10325 Float32 *in = (Float32 *)inBuffer;
10326 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10327 for (j=0; j<info.channels; j++) {
10328 out[info.outOffset[j]] = in[info.inOffset[j]];
10331 out += info.outJump;
10334 else if (info.inFormat == RTAUDIO_FLOAT64) {
10335 Float64 *in = (Float64 *)inBuffer;
10336 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10337 for (j=0; j<info.channels; j++) {
10338 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10341 out += info.outJump;
10345 else if (info.outFormat == RTAUDIO_SINT32) {
10346 Int32 *out = (Int32 *)outBuffer;
10347 if (info.inFormat == RTAUDIO_SINT8) {
10348 signed char *in = (signed char *)inBuffer;
10349 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10350 for (j=0; j<info.channels; j++) {
10351 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10352 out[info.outOffset[j]] <<= 24;
10355 out += info.outJump;
10358 else if (info.inFormat == RTAUDIO_SINT16) {
10359 Int16 *in = (Int16 *)inBuffer;
10360 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10361 for (j=0; j<info.channels; j++) {
10362 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10363 out[info.outOffset[j]] <<= 16;
10366 out += info.outJump;
10369 else if (info.inFormat == RTAUDIO_SINT24) {
10370 Int24 *in = (Int24 *)inBuffer;
10371 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10372 for (j=0; j<info.channels; j++) {
10373 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10374 out[info.outOffset[j]] <<= 8;
10377 out += info.outJump;
10380 else if (info.inFormat == RTAUDIO_SINT32) {
10381 // Channel compensation and/or (de)interleaving only.
10382 Int32 *in = (Int32 *)inBuffer;
10383 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10384 for (j=0; j<info.channels; j++) {
10385 out[info.outOffset[j]] = in[info.inOffset[j]];
10388 out += info.outJump;
10391 else if (info.inFormat == RTAUDIO_FLOAT32) {
10392 Float32 *in = (Float32 *)inBuffer;
10393 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10394 for (j=0; j<info.channels; j++) {
10395 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10398 out += info.outJump;
10401 else if (info.inFormat == RTAUDIO_FLOAT64) {
10402 Float64 *in = (Float64 *)inBuffer;
10403 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10404 for (j=0; j<info.channels; j++) {
10405 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10408 out += info.outJump;
10412 else if (info.outFormat == RTAUDIO_SINT24) {
10413 Int24 *out = (Int24 *)outBuffer;
10414 if (info.inFormat == RTAUDIO_SINT8) {
10415 signed char *in = (signed char *)inBuffer;
10416 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10417 for (j=0; j<info.channels; j++) {
10418 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10419 //out[info.outOffset[j]] <<= 16;
10422 out += info.outJump;
10425 else if (info.inFormat == RTAUDIO_SINT16) {
10426 Int16 *in = (Int16 *)inBuffer;
10427 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10428 for (j=0; j<info.channels; j++) {
10429 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10430 //out[info.outOffset[j]] <<= 8;
10433 out += info.outJump;
10436 else if (info.inFormat == RTAUDIO_SINT24) {
10437 // Channel compensation and/or (de)interleaving only.
10438 Int24 *in = (Int24 *)inBuffer;
10439 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10440 for (j=0; j<info.channels; j++) {
10441 out[info.outOffset[j]] = in[info.inOffset[j]];
10444 out += info.outJump;
10447 else if (info.inFormat == RTAUDIO_SINT32) {
10448 Int32 *in = (Int32 *)inBuffer;
10449 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10450 for (j=0; j<info.channels; j++) {
10451 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10452 //out[info.outOffset[j]] >>= 8;
10455 out += info.outJump;
10458 else if (info.inFormat == RTAUDIO_FLOAT32) {
10459 Float32 *in = (Float32 *)inBuffer;
10460 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10461 for (j=0; j<info.channels; j++) {
10462 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10465 out += info.outJump;
10468 else if (info.inFormat == RTAUDIO_FLOAT64) {
10469 Float64 *in = (Float64 *)inBuffer;
10470 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10471 for (j=0; j<info.channels; j++) {
10472 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10475 out += info.outJump;
10479 else if (info.outFormat == RTAUDIO_SINT16) {
10480 Int16 *out = (Int16 *)outBuffer;
10481 if (info.inFormat == RTAUDIO_SINT8) {
10482 signed char *in = (signed char *)inBuffer;
10483 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10484 for (j=0; j<info.channels; j++) {
10485 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10486 out[info.outOffset[j]] <<= 8;
10489 out += info.outJump;
10492 else if (info.inFormat == RTAUDIO_SINT16) {
10493 // Channel compensation and/or (de)interleaving only.
10494 Int16 *in = (Int16 *)inBuffer;
10495 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10496 for (j=0; j<info.channels; j++) {
10497 out[info.outOffset[j]] = in[info.inOffset[j]];
10500 out += info.outJump;
10503 else if (info.inFormat == RTAUDIO_SINT24) {
10504 Int24 *in = (Int24 *)inBuffer;
10505 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10506 for (j=0; j<info.channels; j++) {
10507 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10510 out += info.outJump;
10513 else if (info.inFormat == RTAUDIO_SINT32) {
10514 Int32 *in = (Int32 *)inBuffer;
10515 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10516 for (j=0; j<info.channels; j++) {
10517 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10520 out += info.outJump;
10523 else if (info.inFormat == RTAUDIO_FLOAT32) {
10524 Float32 *in = (Float32 *)inBuffer;
10525 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10526 for (j=0; j<info.channels; j++) {
10527 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10530 out += info.outJump;
10533 else if (info.inFormat == RTAUDIO_FLOAT64) {
10534 Float64 *in = (Float64 *)inBuffer;
10535 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10536 for (j=0; j<info.channels; j++) {
10537 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10540 out += info.outJump;
10544 else if (info.outFormat == RTAUDIO_SINT8) {
10545 signed char *out = (signed char *)outBuffer;
10546 if (info.inFormat == RTAUDIO_SINT8) {
10547 // Channel compensation and/or (de)interleaving only.
10548 signed char *in = (signed char *)inBuffer;
10549 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10550 for (j=0; j<info.channels; j++) {
10551 out[info.outOffset[j]] = in[info.inOffset[j]];
10554 out += info.outJump;
10557 if (info.inFormat == RTAUDIO_SINT16) {
10558 Int16 *in = (Int16 *)inBuffer;
10559 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10560 for (j=0; j<info.channels; j++) {
10561 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10564 out += info.outJump;
10567 else if (info.inFormat == RTAUDIO_SINT24) {
10568 Int24 *in = (Int24 *)inBuffer;
10569 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10570 for (j=0; j<info.channels; j++) {
10571 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10574 out += info.outJump;
10577 else if (info.inFormat == RTAUDIO_SINT32) {
10578 Int32 *in = (Int32 *)inBuffer;
10579 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10580 for (j=0; j<info.channels; j++) {
10581 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10584 out += info.outJump;
10587 else if (info.inFormat == RTAUDIO_FLOAT32) {
10588 Float32 *in = (Float32 *)inBuffer;
10589 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10590 for (j=0; j<info.channels; j++) {
10591 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10594 out += info.outJump;
10597 else if (info.inFormat == RTAUDIO_FLOAT64) {
10598 Float64 *in = (Float64 *)inBuffer;
10599 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10600 for (j=0; j<info.channels; j++) {
10601 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10604 out += info.outJump;
10610 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10611 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10612 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10614 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10620 if ( format == RTAUDIO_SINT16 ) {
10621 for ( unsigned int i=0; i<samples; i++ ) {
10622 // Swap 1st and 2nd bytes.
10627 // Increment 2 bytes.
10631 else if ( format == RTAUDIO_SINT32 ||
10632 format == RTAUDIO_FLOAT32 ) {
10633 for ( unsigned int i=0; i<samples; i++ ) {
10634 // Swap 1st and 4th bytes.
10639 // Swap 2nd and 3rd bytes.
10645 // Increment 3 more bytes.
10649 else if ( format == RTAUDIO_SINT24 ) {
10650 for ( unsigned int i=0; i<samples; i++ ) {
10651 // Swap 1st and 3rd bytes.
10656 // Increment 2 more bytes.
10660 else if ( format == RTAUDIO_FLOAT64 ) {
10661 for ( unsigned int i=0; i<samples; i++ ) {
10662 // Swap 1st and 8th bytes
10667 // Swap 2nd and 7th bytes
10673 // Swap 3rd and 6th bytes
10679 // Swap 4th and 5th bytes
10685 // Increment 5 more bytes.
10691 // Indentation settings for Vim and Emacs
10693 // Local Variables:
10694 // c-basic-offset: 2
10695 // indent-tabs-mode: nil
10698 // vim: et sts=2 sw=2