1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
154 extern "C" const unsigned int rtaudio_num_compiled_apis =
155 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
158 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
159 // If the build breaks here, check that they match.
160 template<bool b> class StaticAssert { private: StaticAssert() {} };
161 template<> class StaticAssert<true>{ public: StaticAssert() {} };
162 class StaticAssertions { StaticAssertions() {
163 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
166 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
168 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
169 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
172 std::string RtAudio :: getApiName( RtAudio::Api api )
174 if (api < 0 || api >= RtAudio::NUM_APIS)
176 return rtaudio_api_names[api][0];
179 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
181 if (api < 0 || api >= RtAudio::NUM_APIS)
183 return rtaudio_api_names[api][1];
186 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
189 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
190 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
191 return rtaudio_compiled_apis[i];
192 return RtAudio::UNSPECIFIED;
195 void RtAudio :: openRtApi( RtAudio::Api api )
201 #if defined(__UNIX_JACK__)
202 if ( api == UNIX_JACK )
203 rtapi_ = new RtApiJack();
205 #if defined(__LINUX_ALSA__)
206 if ( api == LINUX_ALSA )
207 rtapi_ = new RtApiAlsa();
209 #if defined(__LINUX_PULSE__)
210 if ( api == LINUX_PULSE )
211 rtapi_ = new RtApiPulse();
213 #if defined(__LINUX_OSS__)
214 if ( api == LINUX_OSS )
215 rtapi_ = new RtApiOss();
217 #if defined(__WINDOWS_ASIO__)
218 if ( api == WINDOWS_ASIO )
219 rtapi_ = new RtApiAsio();
221 #if defined(__WINDOWS_WASAPI__)
222 if ( api == WINDOWS_WASAPI )
223 rtapi_ = new RtApiWasapi();
225 #if defined(__WINDOWS_DS__)
226 if ( api == WINDOWS_DS )
227 rtapi_ = new RtApiDs();
229 #if defined(__MACOSX_CORE__)
230 if ( api == MACOSX_CORE )
231 rtapi_ = new RtApiCore();
233 #if defined(__RTAUDIO_DUMMY__)
234 if ( api == RTAUDIO_DUMMY )
235 rtapi_ = new RtApiDummy();
239 RtAudio :: RtAudio( RtAudio::Api api )
243 if ( api != UNSPECIFIED ) {
244 // Attempt to open the specified API.
246 if ( rtapi_ ) return;
248 // No compiled support for specified API value. Issue a debug
249 // warning and continue as if no API was specified.
250 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
253 // Iterate through the compiled APIs and return as soon as we find
254 // one with at least one device or we reach the end of the list.
255 std::vector< RtAudio::Api > apis;
256 getCompiledApi( apis );
257 for ( unsigned int i=0; i<apis.size(); i++ ) {
258 openRtApi( apis[i] );
259 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
262 if ( rtapi_ ) return;
264 // It should not be possible to get here because the preprocessor
265 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
266 // if no API-specific definitions are passed to the compiler. But just
267 // in case something weird happens, we'll thow an error.
268 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
269 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
272 RtAudio :: ~RtAudio()
278 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
279 RtAudio::StreamParameters *inputParameters,
280 RtAudioFormat format, unsigned int sampleRate,
281 unsigned int *bufferFrames,
282 RtAudioCallback callback, void *userData,
283 RtAudio::StreamOptions *options,
284 RtAudioErrorCallback errorCallback )
286 return rtapi_->openStream( outputParameters, inputParameters, format,
287 sampleRate, bufferFrames, callback,
288 userData, options, errorCallback );
291 // *************************************************** //
293 // Public RtApi definitions (see end of file for
294 // private or protected utility functions).
296 // *************************************************** //
301 MUTEX_INITIALIZE( &stream_.mutex );
302 showWarnings_ = true;
303 firstErrorOccurred_ = false;
308 MUTEX_DESTROY( &stream_.mutex );
311 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
312 RtAudio::StreamParameters *iParams,
313 RtAudioFormat format, unsigned int sampleRate,
314 unsigned int *bufferFrames,
315 RtAudioCallback callback, void *userData,
316 RtAudio::StreamOptions *options,
317 RtAudioErrorCallback errorCallback )
319 if ( stream_.state != STREAM_CLOSED ) {
320 errorText_ = "RtApi::openStream: a stream is already open!";
321 error( RtAudioError::INVALID_USE );
325 // Clear stream information potentially left from a previously open stream.
328 if ( oParams && oParams->nChannels < 1 ) {
329 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
330 error( RtAudioError::INVALID_USE );
334 if ( iParams && iParams->nChannels < 1 ) {
335 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
336 error( RtAudioError::INVALID_USE );
340 if ( oParams == NULL && iParams == NULL ) {
341 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
342 error( RtAudioError::INVALID_USE );
346 if ( formatBytes(format) == 0 ) {
347 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
348 error( RtAudioError::INVALID_USE );
352 unsigned int nDevices = getDeviceCount();
353 unsigned int oChannels = 0;
355 oChannels = oParams->nChannels;
356 if ( oParams->deviceId >= nDevices ) {
357 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
358 error( RtAudioError::INVALID_USE );
363 unsigned int iChannels = 0;
365 iChannels = iParams->nChannels;
366 if ( iParams->deviceId >= nDevices ) {
367 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
368 error( RtAudioError::INVALID_USE );
375 if ( oChannels > 0 ) {
377 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
378 sampleRate, format, bufferFrames, options );
379 if ( result == false ) {
380 error( RtAudioError::SYSTEM_ERROR );
385 if ( iChannels > 0 ) {
387 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
388 sampleRate, format, bufferFrames, options );
389 if ( result == false ) {
390 if ( oChannels > 0 ) closeStream();
391 error( RtAudioError::SYSTEM_ERROR );
396 stream_.callbackInfo.callback = (void *) callback;
397 stream_.callbackInfo.userData = userData;
398 stream_.callbackInfo.errorCallback = (void *) errorCallback;
400 if ( options ) options->numberOfBuffers = stream_.nBuffers;
401 stream_.state = STREAM_STOPPED;
404 unsigned int RtApi :: getDefaultInputDevice( void )
406 // Should be implemented in subclasses if possible.
410 unsigned int RtApi :: getDefaultOutputDevice( void )
412 // Should be implemented in subclasses if possible.
416 void RtApi :: closeStream( void )
418 // MUST be implemented in subclasses!
422 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
423 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
424 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
425 RtAudio::StreamOptions * /*options*/ )
427 // MUST be implemented in subclasses!
431 void RtApi :: tickStreamTime( void )
433 // Subclasses that do not provide their own implementation of
434 // getStreamTime should call this function once per buffer I/O to
435 // provide basic stream time support.
437 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
446 long RtApi :: getStreamLatency( void )
448 long totalLatency = 0;
449 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
450 totalLatency = stream_.latency[0];
451 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
452 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
460 #if defined( HAVE_GETTIMEOFDAY )
461 // Return a very accurate estimate of the stream time by
462 // adding in the elapsed time since the last tick.
466 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
467 return stream_.streamTime;
469 gettimeofday( &now, NULL );
470 then = stream_.lastTickTimestamp;
471 return stream_.streamTime +
472 ((now.tv_sec + 0.000001 * now.tv_usec) -
473 (then.tv_sec + 0.000001 * then.tv_usec));
475 return stream_.streamTime;
480 void RtApi :: setStreamTime( double time )
485 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
494 unsigned int RtApi :: getStreamSampleRate( void )
498 return stream_.sampleRate;
503 // *************************************************** //
505 // OS/API-specific methods.
507 // *************************************************** //
509 #if defined(__MACOSX_CORE__)
511 // The OS X CoreAudio API is designed to use a separate callback
512 // procedure for each of its audio devices. A single RtAudio duplex
513 // stream using two different devices is supported here, though it
514 // cannot be guaranteed to always behave correctly because we cannot
515 // synchronize these two callbacks.
517 // A property listener is installed for over/underrun information.
518 // However, no functionality is currently provided to allow property
519 // listeners to trigger user handlers because it is unclear what could
520 // be done if a critical stream parameter (buffer size, sample rate,
521 // device disconnect) notification arrived. The listeners entail
522 // quite a bit of extra code and most likely, a user program wouldn't
523 // be prepared for the result anyway. However, we do provide a flag
524 // to the client callback function to inform of an over/underrun.
526 // A structure to hold various information related to the CoreAudio API
529 AudioDeviceID id[2]; // device ids
530 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
531 AudioDeviceIOProcID procId[2];
533 UInt32 iStream[2]; // device stream index (or first if using multiple)
534 UInt32 nStreams[2]; // number of streams to use
537 pthread_cond_t condition;
538 int drainCounter; // Tracks callback counts when draining
539 bool internalDrain; // Indicates if stop is initiated from callback or not.
542 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
545 RtApiCore:: RtApiCore()
547 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
548 // This is a largely undocumented but absolutely necessary
549 // requirement starting with OS-X 10.6. If not called, queries and
550 // updates to various audio device properties are not handled
552 CFRunLoopRef theRunLoop = NULL;
553 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
554 kAudioObjectPropertyScopeGlobal,
555 kAudioObjectPropertyElementMaster };
556 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
557 if ( result != noErr ) {
558 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
559 error( RtAudioError::WARNING );
564 RtApiCore :: ~RtApiCore()
566 // The subclass destructor gets called before the base class
567 // destructor, so close an existing stream before deallocating
568 // apiDeviceId memory.
569 if ( stream_.state != STREAM_CLOSED ) closeStream();
572 unsigned int RtApiCore :: getDeviceCount( void )
574 // Find out how many audio devices there are, if any.
576 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
577 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
578 if ( result != noErr ) {
579 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
580 error( RtAudioError::WARNING );
584 return dataSize / sizeof( AudioDeviceID );
587 unsigned int RtApiCore :: getDefaultInputDevice( void )
589 unsigned int nDevices = getDeviceCount();
590 if ( nDevices <= 1 ) return 0;
593 UInt32 dataSize = sizeof( AudioDeviceID );
594 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
595 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
596 if ( result != noErr ) {
597 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
598 error( RtAudioError::WARNING );
602 dataSize *= nDevices;
603 AudioDeviceID deviceList[ nDevices ];
604 property.mSelector = kAudioHardwarePropertyDevices;
605 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
606 if ( result != noErr ) {
607 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
608 error( RtAudioError::WARNING );
612 for ( unsigned int i=0; i<nDevices; i++ )
613 if ( id == deviceList[i] ) return i;
615 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
616 error( RtAudioError::WARNING );
620 unsigned int RtApiCore :: getDefaultOutputDevice( void )
622 unsigned int nDevices = getDeviceCount();
623 if ( nDevices <= 1 ) return 0;
626 UInt32 dataSize = sizeof( AudioDeviceID );
627 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
628 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
629 if ( result != noErr ) {
630 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
631 error( RtAudioError::WARNING );
635 dataSize = sizeof( AudioDeviceID ) * nDevices;
636 AudioDeviceID deviceList[ nDevices ];
637 property.mSelector = kAudioHardwarePropertyDevices;
638 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
639 if ( result != noErr ) {
640 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
641 error( RtAudioError::WARNING );
645 for ( unsigned int i=0; i<nDevices; i++ )
646 if ( id == deviceList[i] ) return i;
648 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
649 error( RtAudioError::WARNING );
653 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
655 RtAudio::DeviceInfo info;
659 unsigned int nDevices = getDeviceCount();
660 if ( nDevices == 0 ) {
661 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
662 error( RtAudioError::INVALID_USE );
666 if ( device >= nDevices ) {
667 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
668 error( RtAudioError::INVALID_USE );
672 AudioDeviceID deviceList[ nDevices ];
673 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
674 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
675 kAudioObjectPropertyScopeGlobal,
676 kAudioObjectPropertyElementMaster };
677 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
678 0, NULL, &dataSize, (void *) &deviceList );
679 if ( result != noErr ) {
680 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
681 error( RtAudioError::WARNING );
685 AudioDeviceID id = deviceList[ device ];
687 // Get the device name.
690 dataSize = sizeof( CFStringRef );
691 property.mSelector = kAudioObjectPropertyManufacturer;
692 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
693 if ( result != noErr ) {
694 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
695 errorText_ = errorStream_.str();
696 error( RtAudioError::WARNING );
700 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
701 long length = CFStringGetLength(cfname);
702 char *mname = (char *)malloc(length * 3 + 1);
703 #if defined( UNICODE ) || defined( _UNICODE )
704 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
706 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
708 info.name.append( (const char *)mname, strlen(mname) );
709 info.name.append( ": " );
713 property.mSelector = kAudioObjectPropertyName;
714 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
715 if ( result != noErr ) {
716 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
723 length = CFStringGetLength(cfname);
724 char *name = (char *)malloc(length * 3 + 1);
725 #if defined( UNICODE ) || defined( _UNICODE )
726 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
728 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
730 info.name.append( (const char *)name, strlen(name) );
734 // Get the output stream "configuration".
735 AudioBufferList *bufferList = nil;
736 property.mSelector = kAudioDevicePropertyStreamConfiguration;
737 property.mScope = kAudioDevicePropertyScopeOutput;
738 // property.mElement = kAudioObjectPropertyElementWildcard;
740 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
741 if ( result != noErr || dataSize == 0 ) {
742 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
743 errorText_ = errorStream_.str();
744 error( RtAudioError::WARNING );
748 // Allocate the AudioBufferList.
749 bufferList = (AudioBufferList *) malloc( dataSize );
750 if ( bufferList == NULL ) {
751 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
752 error( RtAudioError::WARNING );
756 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
757 if ( result != noErr || dataSize == 0 ) {
759 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
760 errorText_ = errorStream_.str();
761 error( RtAudioError::WARNING );
765 // Get output channel information.
766 unsigned int i, nStreams = bufferList->mNumberBuffers;
767 for ( i=0; i<nStreams; i++ )
768 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
771 // Get the input stream "configuration".
772 property.mScope = kAudioDevicePropertyScopeInput;
773 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
774 if ( result != noErr || dataSize == 0 ) {
775 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
776 errorText_ = errorStream_.str();
777 error( RtAudioError::WARNING );
781 // Allocate the AudioBufferList.
782 bufferList = (AudioBufferList *) malloc( dataSize );
783 if ( bufferList == NULL ) {
784 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
785 error( RtAudioError::WARNING );
789 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
790 if (result != noErr || dataSize == 0) {
792 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
793 errorText_ = errorStream_.str();
794 error( RtAudioError::WARNING );
798 // Get input channel information.
799 nStreams = bufferList->mNumberBuffers;
800 for ( i=0; i<nStreams; i++ )
801 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
804 // If device opens for both playback and capture, we determine the channels.
805 if ( info.outputChannels > 0 && info.inputChannels > 0 )
806 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
808 // Probe the device sample rates.
809 bool isInput = false;
810 if ( info.outputChannels == 0 ) isInput = true;
812 // Determine the supported sample rates.
813 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
814 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
815 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
816 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
817 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
818 errorText_ = errorStream_.str();
819 error( RtAudioError::WARNING );
823 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
824 AudioValueRange rangeList[ nRanges ];
825 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
826 if ( result != kAudioHardwareNoError ) {
827 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
828 errorText_ = errorStream_.str();
829 error( RtAudioError::WARNING );
833 // The sample rate reporting mechanism is a bit of a mystery. It
834 // seems that it can either return individual rates or a range of
835 // rates. I assume that if the min / max range values are the same,
836 // then that represents a single supported rate and if the min / max
837 // range values are different, the device supports an arbitrary
838 // range of values (though there might be multiple ranges, so we'll
839 // use the most conservative range).
840 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
841 bool haveValueRange = false;
842 info.sampleRates.clear();
843 for ( UInt32 i=0; i<nRanges; i++ ) {
844 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
845 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
846 info.sampleRates.push_back( tmpSr );
848 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
849 info.preferredSampleRate = tmpSr;
852 haveValueRange = true;
853 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
854 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
858 if ( haveValueRange ) {
859 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
860 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
861 info.sampleRates.push_back( SAMPLE_RATES[k] );
863 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
864 info.preferredSampleRate = SAMPLE_RATES[k];
869 // Sort and remove any redundant values
870 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
871 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
873 if ( info.sampleRates.size() == 0 ) {
874 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
875 errorText_ = errorStream_.str();
876 error( RtAudioError::WARNING );
880 // Probe the currently configured sample rate
882 dataSize = sizeof( Float64 );
883 property.mSelector = kAudioDevicePropertyNominalSampleRate;
884 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
885 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
887 // CoreAudio always uses 32-bit floating point data for PCM streams.
888 // Thus, any other "physical" formats supported by the device are of
889 // no interest to the client.
890 info.nativeFormats = RTAUDIO_FLOAT32;
892 if ( info.outputChannels > 0 )
893 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
894 if ( info.inputChannels > 0 )
895 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
901 static OSStatus callbackHandler( AudioDeviceID inDevice,
902 const AudioTimeStamp* /*inNow*/,
903 const AudioBufferList* inInputData,
904 const AudioTimeStamp* /*inInputTime*/,
905 AudioBufferList* outOutputData,
906 const AudioTimeStamp* /*inOutputTime*/,
909 CallbackInfo *info = (CallbackInfo *) infoPointer;
911 RtApiCore *object = (RtApiCore *) info->object;
912 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
913 return kAudioHardwareUnspecifiedError;
915 return kAudioHardwareNoError;
918 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
920 const AudioObjectPropertyAddress properties[],
923 for ( UInt32 i=0; i<nAddresses; i++ ) {
924 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
925 CallbackInfo *info = (CallbackInfo *) infoPointer;
926 RtApiCore *object = (RtApiCore *) info->object;
927 info->deviceDisconnected = true;
928 object->closeStream();
929 return kAudioHardwareUnspecifiedError;
933 return kAudioHardwareNoError;
936 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
938 const AudioObjectPropertyAddress properties[],
939 void* handlePointer )
941 CoreHandle *handle = (CoreHandle *) handlePointer;
942 for ( UInt32 i=0; i<nAddresses; i++ ) {
943 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
944 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
945 handle->xrun[1] = true;
947 handle->xrun[0] = true;
951 return kAudioHardwareNoError;
954 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
955 unsigned int firstChannel, unsigned int sampleRate,
956 RtAudioFormat format, unsigned int *bufferSize,
957 RtAudio::StreamOptions *options )
960 unsigned int nDevices = getDeviceCount();
961 if ( nDevices == 0 ) {
962 // This should not happen because a check is made before this function is called.
963 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
967 if ( device >= nDevices ) {
968 // This should not happen because a check is made before this function is called.
969 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
973 AudioDeviceID deviceList[ nDevices ];
974 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
975 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
976 kAudioObjectPropertyScopeGlobal,
977 kAudioObjectPropertyElementMaster };
978 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
979 0, NULL, &dataSize, (void *) &deviceList );
980 if ( result != noErr ) {
981 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
985 AudioDeviceID id = deviceList[ device ];
987 // Setup for stream mode.
988 bool isInput = false;
989 if ( mode == INPUT ) {
991 property.mScope = kAudioDevicePropertyScopeInput;
994 property.mScope = kAudioDevicePropertyScopeOutput;
996 // Get the stream "configuration".
997 AudioBufferList *bufferList = nil;
999 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1000 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1001 if ( result != noErr || dataSize == 0 ) {
1002 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1003 errorText_ = errorStream_.str();
1007 // Allocate the AudioBufferList.
1008 bufferList = (AudioBufferList *) malloc( dataSize );
1009 if ( bufferList == NULL ) {
1010 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1014 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1015 if (result != noErr || dataSize == 0) {
1017 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1018 errorText_ = errorStream_.str();
1022 // Search for one or more streams that contain the desired number of
1023 // channels. CoreAudio devices can have an arbitrary number of
1024 // streams and each stream can have an arbitrary number of channels.
1025 // For each stream, a single buffer of interleaved samples is
1026 // provided. RtAudio prefers the use of one stream of interleaved
1027 // data or multiple consecutive single-channel streams. However, we
1028 // now support multiple consecutive multi-channel streams of
1029 // interleaved data as well.
1030 UInt32 iStream, offsetCounter = firstChannel;
1031 UInt32 nStreams = bufferList->mNumberBuffers;
1032 bool monoMode = false;
1033 bool foundStream = false;
1035 // First check that the device supports the requested number of
1037 UInt32 deviceChannels = 0;
1038 for ( iStream=0; iStream<nStreams; iStream++ )
1039 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1041 if ( deviceChannels < ( channels + firstChannel ) ) {
1043 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1044 errorText_ = errorStream_.str();
1048 // Look for a single stream meeting our needs.
1049 UInt32 firstStream = 0, streamCount = 1, streamChannels = 0, channelOffset = 0;
1050 for ( iStream=0; iStream<nStreams; iStream++ ) {
1051 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1052 if ( streamChannels >= channels + offsetCounter ) {
1053 firstStream = iStream;
1054 channelOffset = offsetCounter;
1058 if ( streamChannels > offsetCounter ) break;
1059 offsetCounter -= streamChannels;
1062 // If we didn't find a single stream above, then we should be able
1063 // to meet the channel specification with multiple streams.
1064 if ( foundStream == false ) {
1066 offsetCounter = firstChannel;
1067 for ( iStream=0; iStream<nStreams; iStream++ ) {
1068 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1069 if ( streamChannels > offsetCounter ) break;
1070 offsetCounter -= streamChannels;
1073 firstStream = iStream;
1074 channelOffset = offsetCounter;
1075 Int32 channelCounter = channels + offsetCounter - streamChannels;
1077 if ( streamChannels > 1 ) monoMode = false;
1078 while ( channelCounter > 0 ) {
1079 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1080 if ( streamChannels > 1 ) monoMode = false;
1081 channelCounter -= streamChannels;
1088 // Determine the buffer size.
1089 AudioValueRange bufferRange;
1090 dataSize = sizeof( AudioValueRange );
1091 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1092 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1094 if ( result != noErr ) {
1095 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1096 errorText_ = errorStream_.str();
1100 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1101 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMaximum;
1102 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1104 // Set the buffer size. For multiple streams, I'm assuming we only
1105 // need to make this setting for the master channel.
1106 UInt32 theSize = (UInt32) *bufferSize;
1107 dataSize = sizeof( UInt32 );
1108 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1109 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1111 if ( result != noErr ) {
1112 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1113 errorText_ = errorStream_.str();
1117 // If attempting to setup a duplex stream, the bufferSize parameter
1118 // MUST be the same in both directions!
1119 *bufferSize = theSize;
1120 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1121 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1122 errorText_ = errorStream_.str();
1126 stream_.bufferSize = *bufferSize;
1127 stream_.nBuffers = 1;
1129 // Try to set "hog" mode ... it's not clear to me this is working.
1130 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1132 dataSize = sizeof( hog_pid );
1133 property.mSelector = kAudioDevicePropertyHogMode;
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1135 if ( result != noErr ) {
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1137 errorText_ = errorStream_.str();
1141 if ( hog_pid != getpid() ) {
1143 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1146 errorText_ = errorStream_.str();
1152 // Check and if necessary, change the sample rate for the device.
1153 Float64 nominalRate;
1154 dataSize = sizeof( Float64 );
1155 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1156 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1159 errorText_ = errorStream_.str();
1163 // Only try to change the sample rate if off by more than 1 Hz.
1164 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1166 nominalRate = (Float64) sampleRate;
1167 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1168 if ( result != noErr ) {
1169 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1170 errorText_ = errorStream_.str();
1174 // Now wait until the reported nominal rate is what we just set.
1175 UInt32 microCounter = 0;
1176 Float64 reportedRate = 0.0;
1177 while ( reportedRate != nominalRate ) {
1178 microCounter += 5000;
1179 if ( microCounter > 2000000 ) break;
1181 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1184 if ( microCounter > 2000000 ) {
1185 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1186 errorText_ = errorStream_.str();
1191 // Now set the stream format for all streams. Also, check the
1192 // physical format of the device and change that if necessary.
1193 AudioStreamBasicDescription description;
1194 dataSize = sizeof( AudioStreamBasicDescription );
1195 property.mSelector = kAudioStreamPropertyVirtualFormat;
1196 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1197 if ( result != noErr ) {
1198 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1199 errorText_ = errorStream_.str();
1203 // Set the sample rate and data format id. However, only make the
1204 // change if the sample rate is not within 1.0 of the desired
1205 // rate and the format is not linear pcm.
1206 bool updateFormat = false;
1207 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1208 description.mSampleRate = (Float64) sampleRate;
1209 updateFormat = true;
1212 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1213 description.mFormatID = kAudioFormatLinearPCM;
1214 updateFormat = true;
1217 if ( updateFormat ) {
1218 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1219 if ( result != noErr ) {
1220 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1221 errorText_ = errorStream_.str();
1226 // Now check the physical format.
1227 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1228 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1229 if ( result != noErr ) {
1230 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1231 errorText_ = errorStream_.str();
1235 //std::cout << "Current physical stream format:" << std::endl;
1236 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1237 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1238 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1239 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1241 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1242 description.mFormatID = kAudioFormatLinearPCM;
1243 //description.mSampleRate = (Float64) sampleRate;
1244 AudioStreamBasicDescription testDescription = description;
1247 // We'll try higher bit rates first and then work our way down.
1248 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1254 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1256 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1258 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1262 bool setPhysicalFormat = false;
1263 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1264 testDescription = description;
1265 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1266 testDescription.mFormatFlags = physicalFormats[i].second;
1267 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1268 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1271 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1272 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1273 if ( result == noErr ) {
1274 setPhysicalFormat = true;
1275 //std::cout << "Updated physical stream format:" << std::endl;
1276 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1277 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1278 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1279 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1284 if ( !setPhysicalFormat ) {
1285 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1286 errorText_ = errorStream_.str();
1289 } // done setting virtual/physical formats.
1291 // Get the stream / device latency.
1293 dataSize = sizeof( UInt32 );
1294 property.mSelector = kAudioDevicePropertyLatency;
1295 if ( AudioObjectHasProperty( id, &property ) == true ) {
1296 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1297 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1299 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1300 errorText_ = errorStream_.str();
1301 error( RtAudioError::WARNING );
1305 // Byte-swapping: According to AudioHardware.h, the stream data will
1306 // always be presented in native-endian format, so we should never
1307 // need to byte swap.
1308 stream_.doByteSwap[mode] = false;
1310 // From the CoreAudio documentation, PCM data must be supplied as
1312 stream_.userFormat = format;
1313 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1315 if ( streamCount == 1 )
1316 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1317 else // multiple streams
1318 stream_.nDeviceChannels[mode] = channels;
1319 stream_.nUserChannels[mode] = channels;
1320 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1321 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1322 else stream_.userInterleaved = true;
1323 stream_.deviceInterleaved[mode] = true;
1324 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1326 // Set flags for buffer conversion.
1327 stream_.doConvertBuffer[mode] = false;
1328 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1331 stream_.doConvertBuffer[mode] = true;
1332 if ( streamCount == 1 ) {
1333 if ( stream_.nUserChannels[mode] > 1 &&
1334 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1335 stream_.doConvertBuffer[mode] = true;
1337 else if ( monoMode && stream_.userInterleaved )
1338 stream_.doConvertBuffer[mode] = true;
1340 // Allocate our CoreHandle structure for the stream.
1341 CoreHandle *handle = 0;
1342 if ( stream_.apiHandle == 0 ) {
1344 handle = new CoreHandle;
1346 catch ( std::bad_alloc& ) {
1347 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1351 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1352 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1355 stream_.apiHandle = (void *) handle;
1358 handle = (CoreHandle *) stream_.apiHandle;
1359 handle->iStream[mode] = firstStream;
1360 handle->nStreams[mode] = streamCount;
1361 handle->id[mode] = id;
1363 // Allocate necessary internal buffers.
1364 unsigned long bufferBytes;
1365 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, sizeof(char) );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1433 if ( result != noErr ) {
1434 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1435 errorText_ = errorStream_.str();
1439 // Setup a listener to detect a possible device disconnect.
1440 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1441 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1442 if ( result != noErr ) {
1443 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1444 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1445 errorText_ = errorStream_.str();
1453 pthread_cond_destroy( &handle->condition );
1455 stream_.apiHandle = 0;
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1471 //stream_.state = STREAM_CLOSED;
1475 void RtApiCore :: closeStream( void )
1477 if ( stream_.state == STREAM_CLOSED ) {
1478 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1479 error( RtAudioError::WARNING );
1483 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1484 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1486 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1487 kAudioObjectPropertyScopeGlobal,
1488 kAudioObjectPropertyElementMaster };
1490 property.mSelector = kAudioDeviceProcessorOverload;
1491 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1492 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1493 error( RtAudioError::WARNING );
1495 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1496 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1497 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1498 error( RtAudioError::WARNING );
1501 if ( stream_.state == STREAM_RUNNING )
1502 AudioDeviceStop( handle->id[0], callbackHandler );
1503 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1504 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1506 // deprecated in favor of AudioDeviceDestroyIOProcID()
1507 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1511 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1513 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1514 kAudioObjectPropertyScopeGlobal,
1515 kAudioObjectPropertyElementMaster };
1517 property.mSelector = kAudioDeviceProcessorOverload;
1518 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1519 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1520 error( RtAudioError::WARNING );
1522 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1523 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1524 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1525 error( RtAudioError::WARNING );
1528 if ( stream_.state == STREAM_RUNNING )
1529 AudioDeviceStop( handle->id[1], callbackHandler );
1530 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1531 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1533 // deprecated in favor of AudioDeviceDestroyIOProcID()
1534 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1538 for ( int i=0; i<2; i++ ) {
1539 if ( stream_.userBuffer[i] ) {
1540 free( stream_.userBuffer[i] );
1541 stream_.userBuffer[i] = 0;
1545 if ( stream_.deviceBuffer ) {
1546 free( stream_.deviceBuffer );
1547 stream_.deviceBuffer = 0;
1550 // Destroy pthread condition variable.
1551 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1552 pthread_cond_destroy( &handle->condition );
1554 stream_.apiHandle = 0;
1556 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1557 if ( info->deviceDisconnected ) {
1558 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1559 error( RtAudioError::DEVICE_DISCONNECT );
1563 //stream_.mode = UNINITIALIZED;
1564 //stream_.state = STREAM_CLOSED;
1567 void RtApiCore :: startStream( void )
1570 if ( stream_.state != STREAM_STOPPED ) {
1571 if ( stream_.state == STREAM_RUNNING )
1572 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1573 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1574 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1575 error( RtAudioError::WARNING );
1580 #if defined( HAVE_GETTIMEOFDAY )
1581 gettimeofday( &stream_.lastTickTimestamp, NULL );
1585 OSStatus result = noErr;
1586 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1589 result = AudioDeviceStart( handle->id[0], callbackHandler );
1590 if ( result != noErr ) {
1591 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1592 errorText_ = errorStream_.str();
1597 if ( stream_.mode == INPUT ||
1598 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1600 // Clear user input buffer
1601 unsigned long bufferBytes;
1602 bufferBytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
1603 memset( stream_.userBuffer[1], 0, bufferBytes * sizeof(char) );
1605 result = AudioDeviceStart( handle->id[1], callbackHandler );
1606 if ( result != noErr ) {
1607 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1608 errorText_ = errorStream_.str();
1613 // set stream time to zero?
1614 handle->drainCounter = 0;
1615 handle->internalDrain = false;
1616 stream_.state = STREAM_RUNNING;
1619 if ( result == noErr ) return;
1620 error( RtAudioError::SYSTEM_ERROR );
1623 void RtApiCore :: stopStream( void )
1626 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1627 if ( stream_.state == STREAM_STOPPED )
1628 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1629 else if ( stream_.state == STREAM_CLOSED )
1630 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1631 error( RtAudioError::WARNING );
1635 OSStatus result = noErr;
1636 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1637 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1639 if ( handle->drainCounter == 0 ) {
1640 handle->drainCounter = 2;
1641 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1644 result = AudioDeviceStop( handle->id[0], callbackHandler );
1645 if ( result != noErr ) {
1646 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1647 errorText_ = errorStream_.str();
1652 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1654 result = AudioDeviceStop( handle->id[1], callbackHandler );
1655 if ( result != noErr ) {
1656 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1657 errorText_ = errorStream_.str();
1662 stream_.state = STREAM_STOPPED;
1665 if ( result == noErr ) return;
1666 error( RtAudioError::SYSTEM_ERROR );
1669 void RtApiCore :: abortStream( void )
1672 if ( stream_.state != STREAM_RUNNING ) {
1673 if ( stream_.state == STREAM_STOPPED )
1674 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1675 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1676 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1677 error( RtAudioError::WARNING );
1681 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1682 handle->drainCounter = 2;
1684 stream_.state = STREAM_STOPPING;
1688 // This function will be called by a spawned thread when the user
1689 // callback function signals that the stream should be stopped or
1690 // aborted. It is better to handle it this way because the
1691 // callbackEvent() function probably should return before the AudioDeviceStop()
1692 // function is called.
1693 static void *coreStopStream( void *ptr )
1695 CallbackInfo *info = (CallbackInfo *) ptr;
1696 RtApiCore *object = (RtApiCore *) info->object;
1698 object->stopStream();
1699 pthread_exit( NULL );
1702 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1703 const AudioBufferList *inBufferList,
1704 const AudioBufferList *outBufferList )
1706 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1707 if ( stream_.state == STREAM_CLOSED ) {
1708 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1709 error( RtAudioError::WARNING );
1713 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1714 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1716 // Check if we were draining the stream and signal is finished.
1717 if ( handle->drainCounter > 3 ) {
1718 ThreadHandle threadId;
1720 stream_.state = STREAM_STOPPING;
1721 if ( handle->internalDrain == true )
1722 pthread_create( &threadId, NULL, coreStopStream, info );
1723 else // external call to stopStream()
1724 pthread_cond_signal( &handle->condition );
1728 AudioDeviceID outputDevice = handle->id[0];
1730 // Invoke user callback to get fresh output data UNLESS we are
1731 // draining stream or duplex mode AND the input/output devices are
1732 // different AND this function is called for the input device.
1733 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1734 RtAudioCallback callback = (RtAudioCallback) info->callback;
1735 double streamTime = getStreamTime();
1736 RtAudioStreamStatus status = 0;
1737 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1738 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1739 handle->xrun[0] = false;
1741 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1742 status |= RTAUDIO_INPUT_OVERFLOW;
1743 handle->xrun[1] = false;
1746 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1747 stream_.bufferSize, streamTime, status, info->userData );
1748 if ( cbReturnValue == 2 ) {
1752 else if ( cbReturnValue == 1 ) {
1753 handle->drainCounter = 1;
1754 handle->internalDrain = true;
1758 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1760 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1762 if ( handle->nStreams[0] == 1 ) {
1763 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1765 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1767 else { // fill multiple streams with zeros
1768 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1771 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1775 else if ( handle->nStreams[0] == 1 ) {
1776 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1777 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1778 stream_.userBuffer[0], stream_.convertInfo[0] );
1780 else { // copy from user buffer
1781 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1782 stream_.userBuffer[0],
1783 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1786 else { // fill multiple streams
1787 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1788 if ( stream_.doConvertBuffer[0] ) {
1789 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1790 inBuffer = (Float32 *) stream_.deviceBuffer;
1793 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1794 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1795 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1796 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1797 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1800 else { // fill multiple multi-channel streams with interleaved data
1801 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1804 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1805 UInt32 inChannels = stream_.nUserChannels[0];
1806 if ( stream_.doConvertBuffer[0] ) {
1807 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1808 inChannels = stream_.nDeviceChannels[0];
1811 if ( inInterleaved ) inOffset = 1;
1812 else inOffset = stream_.bufferSize;
1814 channelsLeft = inChannels;
1815 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1817 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1818 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1821 // Account for possible channel offset in first stream
1822 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1823 streamChannels -= stream_.channelOffset[0];
1824 outJump = stream_.channelOffset[0];
1828 // Account for possible unfilled channels at end of the last stream
1829 if ( streamChannels > channelsLeft ) {
1830 outJump = streamChannels - channelsLeft;
1831 streamChannels = channelsLeft;
1834 // Determine input buffer offsets and skips
1835 if ( inInterleaved ) {
1836 inJump = inChannels;
1837 in += inChannels - channelsLeft;
1841 in += (inChannels - channelsLeft) * inOffset;
1844 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1845 for ( unsigned int j=0; j<streamChannels; j++ ) {
1846 *out++ = in[j*inOffset];
1851 channelsLeft -= streamChannels;
1857 // Don't bother draining input
1858 if ( handle->drainCounter ) {
1859 handle->drainCounter++;
1863 AudioDeviceID inputDevice;
1864 inputDevice = handle->id[1];
1865 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1867 if ( handle->nStreams[1] == 1 ) {
1868 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1869 convertBuffer( stream_.userBuffer[1],
1870 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1871 stream_.convertInfo[1] );
1873 else { // copy to user buffer
1874 memcpy( stream_.userBuffer[1],
1875 inBufferList->mBuffers[handle->iStream[1]].mData,
1876 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1879 else { // read from multiple streams
1880 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1881 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1883 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1884 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1885 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1886 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1887 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1890 else { // read from multiple multi-channel streams
1891 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1894 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1895 UInt32 outChannels = stream_.nUserChannels[1];
1896 if ( stream_.doConvertBuffer[1] ) {
1897 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1898 outChannels = stream_.nDeviceChannels[1];
1901 if ( outInterleaved ) outOffset = 1;
1902 else outOffset = stream_.bufferSize;
1904 channelsLeft = outChannels;
1905 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1907 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1908 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1911 // Account for possible channel offset in first stream
1912 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1913 streamChannels -= stream_.channelOffset[1];
1914 inJump = stream_.channelOffset[1];
1918 // Account for possible unread channels at end of the last stream
1919 if ( streamChannels > channelsLeft ) {
1920 inJump = streamChannels - channelsLeft;
1921 streamChannels = channelsLeft;
1924 // Determine output buffer offsets and skips
1925 if ( outInterleaved ) {
1926 outJump = outChannels;
1927 out += outChannels - channelsLeft;
1931 out += (outChannels - channelsLeft) * outOffset;
1934 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1935 for ( unsigned int j=0; j<streamChannels; j++ ) {
1936 out[j*outOffset] = *in++;
1941 channelsLeft -= streamChannels;
1945 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1946 convertBuffer( stream_.userBuffer[1],
1947 stream_.deviceBuffer,
1948 stream_.convertInfo[1] );
1955 // Make sure to only tick duplex stream time once if using two devices
1956 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1957 RtApi::tickStreamTime();
1962 const char* RtApiCore :: getErrorCode( OSStatus code )
1966 case kAudioHardwareNotRunningError:
1967 return "kAudioHardwareNotRunningError";
1969 case kAudioHardwareUnspecifiedError:
1970 return "kAudioHardwareUnspecifiedError";
1972 case kAudioHardwareUnknownPropertyError:
1973 return "kAudioHardwareUnknownPropertyError";
1975 case kAudioHardwareBadPropertySizeError:
1976 return "kAudioHardwareBadPropertySizeError";
1978 case kAudioHardwareIllegalOperationError:
1979 return "kAudioHardwareIllegalOperationError";
1981 case kAudioHardwareBadObjectError:
1982 return "kAudioHardwareBadObjectError";
1984 case kAudioHardwareBadDeviceError:
1985 return "kAudioHardwareBadDeviceError";
1987 case kAudioHardwareBadStreamError:
1988 return "kAudioHardwareBadStreamError";
1990 case kAudioHardwareUnsupportedOperationError:
1991 return "kAudioHardwareUnsupportedOperationError";
1993 case kAudioDeviceUnsupportedFormatError:
1994 return "kAudioDeviceUnsupportedFormatError";
1996 case kAudioDevicePermissionsError:
1997 return "kAudioDevicePermissionsError";
2000 return "CoreAudio unknown error";
2004 //******************** End of __MACOSX_CORE__ *********************//
2007 #if defined(__UNIX_JACK__)
2009 // JACK is a low-latency audio server, originally written for the
2010 // GNU/Linux operating system and now also ported to OS-X. It can
2011 // connect a number of different applications to an audio device, as
2012 // well as allowing them to share audio between themselves.
2014 // When using JACK with RtAudio, "devices" refer to JACK clients that
2015 // have ports connected to the server. The JACK server is typically
2016 // started in a terminal as follows:
2018 // .jackd -d alsa -d hw:0
2020 // or through an interface program such as qjackctl. Many of the
2021 // parameters normally set for a stream are fixed by the JACK server
2022 // and can be specified when the JACK server is started. In
2025 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2027 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2028 // frames, and number of buffers = 4. Once the server is running, it
2029 // is not possible to override these values. If the values are not
2030 // specified in the command-line, the JACK server uses default values.
2032 // The JACK server does not have to be running when an instance of
2033 // RtApiJack is created, though the function getDeviceCount() will
2034 // report 0 devices found until JACK has been started. When no
2035 // devices are available (i.e., the JACK server is not running), a
2036 // stream cannot be opened.
2038 #include <jack/jack.h>
2042 // A structure to hold various information related to the Jack API
2045 jack_client_t *client;
2046 jack_port_t **ports[2];
2047 std::string deviceName[2];
2049 pthread_cond_t condition;
2050 int drainCounter; // Tracks callback counts when draining
2051 bool internalDrain; // Indicates if stop is initiated from callback or not.
2054 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2057 #if !defined(__RTAUDIO_DEBUG__)
2058 static void jackSilentError( const char * ) {};
2061 RtApiJack :: RtApiJack()
2062 :shouldAutoconnect_(true) {
2063 // Nothing to do here.
2064 #if !defined(__RTAUDIO_DEBUG__)
2065 // Turn off Jack's internal error reporting.
2066 jack_set_error_function( &jackSilentError );
2070 RtApiJack :: ~RtApiJack()
2072 if ( stream_.state != STREAM_CLOSED ) closeStream();
2075 unsigned int RtApiJack :: getDeviceCount( void )
2077 // See if we can become a jack client.
2078 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2079 jack_status_t *status = NULL;
2080 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2081 if ( client == 0 ) return 0;
2084 std::string port, previousPort;
2085 unsigned int nChannels = 0, nDevices = 0;
2086 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2088 // Parse the port names up to the first colon (:).
2091 port = (char *) ports[ nChannels ];
2092 iColon = port.find(":");
2093 if ( iColon != std::string::npos ) {
2094 port = port.substr( 0, iColon + 1 );
2095 if ( port != previousPort ) {
2097 previousPort = port;
2100 } while ( ports[++nChannels] );
2104 jack_client_close( client );
2108 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2110 RtAudio::DeviceInfo info;
2111 info.probed = false;
2113 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2114 jack_status_t *status = NULL;
2115 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2116 if ( client == 0 ) {
2117 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2118 error( RtAudioError::WARNING );
2123 std::string port, previousPort;
2124 unsigned int nPorts = 0, nDevices = 0;
2125 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2127 // Parse the port names up to the first colon (:).
2130 port = (char *) ports[ nPorts ];
2131 iColon = port.find(":");
2132 if ( iColon != std::string::npos ) {
2133 port = port.substr( 0, iColon );
2134 if ( port != previousPort ) {
2135 if ( nDevices == device ) info.name = port;
2137 previousPort = port;
2140 } while ( ports[++nPorts] );
2144 if ( device >= nDevices ) {
2145 jack_client_close( client );
2146 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2147 error( RtAudioError::INVALID_USE );
2151 // Get the current jack server sample rate.
2152 info.sampleRates.clear();
2154 info.preferredSampleRate = jack_get_sample_rate( client );
2155 info.sampleRates.push_back( info.preferredSampleRate );
2157 // Count the available ports containing the client name as device
2158 // channels. Jack "input ports" equal RtAudio output channels.
2159 unsigned int nChannels = 0;
2160 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2162 while ( ports[ nChannels ] ) nChannels++;
2164 info.outputChannels = nChannels;
2167 // Jack "output ports" equal RtAudio input channels.
2169 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2171 while ( ports[ nChannels ] ) nChannels++;
2173 info.inputChannels = nChannels;
2176 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2177 jack_client_close(client);
2178 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2179 error( RtAudioError::WARNING );
2183 // If device opens for both playback and capture, we determine the channels.
2184 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2185 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2187 // Jack always uses 32-bit floats.
2188 info.nativeFormats = RTAUDIO_FLOAT32;
2190 // Jack doesn't provide default devices so we'll use the first available one.
2191 if ( device == 0 && info.outputChannels > 0 )
2192 info.isDefaultOutput = true;
2193 if ( device == 0 && info.inputChannels > 0 )
2194 info.isDefaultInput = true;
2196 jack_client_close(client);
2201 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2203 CallbackInfo *info = (CallbackInfo *) infoPointer;
2205 RtApiJack *object = (RtApiJack *) info->object;
2206 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2211 // This function will be called by a spawned thread when the Jack
2212 // server signals that it is shutting down. It is necessary to handle
2213 // it this way because the jackShutdown() function must return before
2214 // the jack_deactivate() function (in closeStream()) will return.
2215 static void *jackCloseStream( void *ptr )
2217 CallbackInfo *info = (CallbackInfo *) ptr;
2218 RtApiJack *object = (RtApiJack *) info->object;
2220 object->closeStream();
2222 pthread_exit( NULL );
2224 static void jackShutdown( void *infoPointer )
2226 CallbackInfo *info = (CallbackInfo *) infoPointer;
2227 RtApiJack *object = (RtApiJack *) info->object;
2229 // Check current stream state. If stopped, then we'll assume this
2230 // was called as a result of a call to RtApiJack::stopStream (the
2231 // deactivation of a client handle causes this function to be called).
2232 // If not, we'll assume the Jack server is shutting down or some
2233 // other problem occurred and we should close the stream.
2234 if ( object->isStreamRunning() == false ) return;
2236 ThreadHandle threadId;
2237 pthread_create( &threadId, NULL, jackCloseStream, info );
2238 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2241 static int jackXrun( void *infoPointer )
2243 JackHandle *handle = *((JackHandle **) infoPointer);
2245 if ( handle->ports[0] ) handle->xrun[0] = true;
2246 if ( handle->ports[1] ) handle->xrun[1] = true;
2251 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2252 unsigned int firstChannel, unsigned int sampleRate,
2253 RtAudioFormat format, unsigned int *bufferSize,
2254 RtAudio::StreamOptions *options )
2256 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2258 // Look for jack server and try to become a client (only do once per stream).
2259 jack_client_t *client = 0;
2260 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2261 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2262 jack_status_t *status = NULL;
2263 if ( options && !options->streamName.empty() )
2264 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2266 client = jack_client_open( "RtApiJack", jackoptions, status );
2267 if ( client == 0 ) {
2268 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2269 error( RtAudioError::WARNING );
2274 // The handle must have been created on an earlier pass.
2275 client = handle->client;
2279 std::string port, previousPort, deviceName;
2280 unsigned int nPorts = 0, nDevices = 0;
2281 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2283 // Parse the port names up to the first colon (:).
2286 port = (char *) ports[ nPorts ];
2287 iColon = port.find(":");
2288 if ( iColon != std::string::npos ) {
2289 port = port.substr( 0, iColon );
2290 if ( port != previousPort ) {
2291 if ( nDevices == device ) deviceName = port;
2293 previousPort = port;
2296 } while ( ports[++nPorts] );
2300 if ( device >= nDevices ) {
2301 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2305 unsigned long flag = JackPortIsInput;
2306 if ( mode == INPUT ) flag = JackPortIsOutput;
2308 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2309 // Count the available ports containing the client name as device
2310 // channels. Jack "input ports" equal RtAudio output channels.
2311 unsigned int nChannels = 0;
2312 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2314 while ( ports[ nChannels ] ) nChannels++;
2317 // Compare the jack ports for specified client to the requested number of channels.
2318 if ( nChannels < (channels + firstChannel) ) {
2319 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2320 errorText_ = errorStream_.str();
2325 // Check the jack server sample rate.
2326 unsigned int jackRate = jack_get_sample_rate( client );
2327 if ( sampleRate != jackRate ) {
2328 jack_client_close( client );
2329 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2330 errorText_ = errorStream_.str();
2333 stream_.sampleRate = jackRate;
2335 // Get the latency of the JACK port.
2336 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2337 if ( ports[ firstChannel ] ) {
2339 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2340 // the range (usually the min and max are equal)
2341 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2342 // get the latency range
2343 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2344 // be optimistic, use the min!
2345 stream_.latency[mode] = latrange.min;
2346 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2350 // The jack server always uses 32-bit floating-point data.
2351 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2352 stream_.userFormat = format;
2354 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2355 else stream_.userInterleaved = true;
2357 // Jack always uses non-interleaved buffers.
2358 stream_.deviceInterleaved[mode] = false;
2360 // Jack always provides host byte-ordered data.
2361 stream_.doByteSwap[mode] = false;
2363 // Get the buffer size. The buffer size and number of buffers
2364 // (periods) is set when the jack server is started.
2365 stream_.bufferSize = (int) jack_get_buffer_size( client );
2366 *bufferSize = stream_.bufferSize;
2368 stream_.nDeviceChannels[mode] = channels;
2369 stream_.nUserChannels[mode] = channels;
2371 // Set flags for buffer conversion.
2372 stream_.doConvertBuffer[mode] = false;
2373 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2374 stream_.doConvertBuffer[mode] = true;
2375 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2376 stream_.nUserChannels[mode] > 1 )
2377 stream_.doConvertBuffer[mode] = true;
2379 // Allocate our JackHandle structure for the stream.
2380 if ( handle == 0 ) {
2382 handle = new JackHandle;
2384 catch ( std::bad_alloc& ) {
2385 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2389 if ( pthread_cond_init(&handle->condition, NULL) ) {
2390 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2393 stream_.apiHandle = (void *) handle;
2394 handle->client = client;
2396 handle->deviceName[mode] = deviceName;
2398 // Allocate necessary internal buffers.
2399 unsigned long bufferBytes;
2400 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2401 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2402 if ( stream_.userBuffer[mode] == NULL ) {
2403 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2407 if ( stream_.doConvertBuffer[mode] ) {
2409 bool makeBuffer = true;
2410 if ( mode == OUTPUT )
2411 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2412 else { // mode == INPUT
2413 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2414 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2415 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2416 if ( bufferBytes < bytesOut ) makeBuffer = false;
2421 bufferBytes *= *bufferSize;
2422 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2423 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2424 if ( stream_.deviceBuffer == NULL ) {
2425 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2431 // Allocate memory for the Jack ports (channels) identifiers.
2432 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2433 if ( handle->ports[mode] == NULL ) {
2434 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2438 stream_.device[mode] = device;
2439 stream_.channelOffset[mode] = firstChannel;
2440 stream_.state = STREAM_STOPPED;
2441 stream_.callbackInfo.object = (void *) this;
2443 if ( stream_.mode == OUTPUT && mode == INPUT )
2444 // We had already set up the stream for output.
2445 stream_.mode = DUPLEX;
2447 stream_.mode = mode;
2448 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2449 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2450 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2453 // Register our ports.
2455 if ( mode == OUTPUT ) {
2456 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2457 snprintf( label, 64, "outport %d", i );
2458 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2459 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2463 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2464 snprintf( label, 64, "inport %d", i );
2465 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2466 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2470 // Setup the buffer conversion information structure. We don't use
2471 // buffers to do channel offsets, so we override that parameter
2473 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2475 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2481 pthread_cond_destroy( &handle->condition );
2482 jack_client_close( handle->client );
2484 if ( handle->ports[0] ) free( handle->ports[0] );
2485 if ( handle->ports[1] ) free( handle->ports[1] );
2488 stream_.apiHandle = 0;
2491 for ( int i=0; i<2; i++ ) {
2492 if ( stream_.userBuffer[i] ) {
2493 free( stream_.userBuffer[i] );
2494 stream_.userBuffer[i] = 0;
2498 if ( stream_.deviceBuffer ) {
2499 free( stream_.deviceBuffer );
2500 stream_.deviceBuffer = 0;
2506 void RtApiJack :: closeStream( void )
2508 if ( stream_.state == STREAM_CLOSED ) {
2509 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2510 error( RtAudioError::WARNING );
2514 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2517 if ( stream_.state == STREAM_RUNNING )
2518 jack_deactivate( handle->client );
2520 jack_client_close( handle->client );
2524 if ( handle->ports[0] ) free( handle->ports[0] );
2525 if ( handle->ports[1] ) free( handle->ports[1] );
2526 pthread_cond_destroy( &handle->condition );
2528 stream_.apiHandle = 0;
2531 for ( int i=0; i<2; i++ ) {
2532 if ( stream_.userBuffer[i] ) {
2533 free( stream_.userBuffer[i] );
2534 stream_.userBuffer[i] = 0;
2538 if ( stream_.deviceBuffer ) {
2539 free( stream_.deviceBuffer );
2540 stream_.deviceBuffer = 0;
2543 stream_.mode = UNINITIALIZED;
2544 stream_.state = STREAM_CLOSED;
2547 void RtApiJack :: startStream( void )
2550 if ( stream_.state == STREAM_RUNNING ) {
2551 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2552 error( RtAudioError::WARNING );
2556 #if defined( HAVE_GETTIMEOFDAY )
2557 gettimeofday( &stream_.lastTickTimestamp, NULL );
2560 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2561 int result = jack_activate( handle->client );
2563 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2569 // Get the list of available ports.
2570 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2572 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2573 if ( ports == NULL) {
2574 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2578 // Now make the port connections. Since RtAudio wasn't designed to
2579 // allow the user to select particular channels of a device, we'll
2580 // just open the first "nChannels" ports with offset.
2581 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2583 if ( ports[ stream_.channelOffset[0] + i ] )
2584 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2587 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2594 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2596 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2597 if ( ports == NULL) {
2598 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2602 // Now make the port connections. See note above.
2603 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2605 if ( ports[ stream_.channelOffset[1] + i ] )
2606 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2609 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2616 handle->drainCounter = 0;
2617 handle->internalDrain = false;
2618 stream_.state = STREAM_RUNNING;
2621 if ( result == 0 ) return;
2622 error( RtAudioError::SYSTEM_ERROR );
2625 void RtApiJack :: stopStream( void )
2628 if ( stream_.state == STREAM_STOPPED ) {
2629 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2630 error( RtAudioError::WARNING );
2634 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2635 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2637 if ( handle->drainCounter == 0 ) {
2638 handle->drainCounter = 2;
2639 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2643 jack_deactivate( handle->client );
2644 stream_.state = STREAM_STOPPED;
2647 void RtApiJack :: abortStream( void )
2650 if ( stream_.state == STREAM_STOPPED ) {
2651 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2652 error( RtAudioError::WARNING );
2656 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2657 handle->drainCounter = 2;
2662 // This function will be called by a spawned thread when the user
2663 // callback function signals that the stream should be stopped or
2664 // aborted. It is necessary to handle it this way because the
2665 // callbackEvent() function must return before the jack_deactivate()
2666 // function will return.
2667 static void *jackStopStream( void *ptr )
2669 CallbackInfo *info = (CallbackInfo *) ptr;
2670 RtApiJack *object = (RtApiJack *) info->object;
2672 object->stopStream();
2673 pthread_exit( NULL );
2676 bool RtApiJack :: callbackEvent( unsigned long nframes )
2678 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2679 if ( stream_.state == STREAM_CLOSED ) {
2680 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2681 error( RtAudioError::WARNING );
2684 if ( stream_.bufferSize != nframes ) {
2685 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2686 error( RtAudioError::WARNING );
2690 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2691 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2693 // Check if we were draining the stream and signal is finished.
2694 if ( handle->drainCounter > 3 ) {
2695 ThreadHandle threadId;
2697 stream_.state = STREAM_STOPPING;
2698 if ( handle->internalDrain == true )
2699 pthread_create( &threadId, NULL, jackStopStream, info );
2701 pthread_cond_signal( &handle->condition );
2705 // Invoke user callback first, to get fresh output data.
2706 if ( handle->drainCounter == 0 ) {
2707 RtAudioCallback callback = (RtAudioCallback) info->callback;
2708 double streamTime = getStreamTime();
2709 RtAudioStreamStatus status = 0;
2710 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2711 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2712 handle->xrun[0] = false;
2714 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2715 status |= RTAUDIO_INPUT_OVERFLOW;
2716 handle->xrun[1] = false;
2718 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2719 stream_.bufferSize, streamTime, status, info->userData );
2720 if ( cbReturnValue == 2 ) {
2721 stream_.state = STREAM_STOPPING;
2722 handle->drainCounter = 2;
2724 pthread_create( &id, NULL, jackStopStream, info );
2727 else if ( cbReturnValue == 1 ) {
2728 handle->drainCounter = 1;
2729 handle->internalDrain = true;
2733 jack_default_audio_sample_t *jackbuffer;
2734 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2735 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2737 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2739 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2740 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2741 memset( jackbuffer, 0, bufferBytes );
2745 else if ( stream_.doConvertBuffer[0] ) {
2747 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2749 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2750 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2751 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2754 else { // no buffer conversion
2755 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2756 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2757 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2762 // Don't bother draining input
2763 if ( handle->drainCounter ) {
2764 handle->drainCounter++;
2768 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2770 if ( stream_.doConvertBuffer[1] ) {
2771 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2772 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2773 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2775 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2777 else { // no buffer conversion
2778 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2779 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2780 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2786 RtApi::tickStreamTime();
2789 //******************** End of __UNIX_JACK__ *********************//
2792 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2794 // The ASIO API is designed around a callback scheme, so this
2795 // implementation is similar to that used for OS-X CoreAudio and Linux
2796 // Jack. The primary constraint with ASIO is that it only allows
2797 // access to a single driver at a time. Thus, it is not possible to
2798 // have more than one simultaneous RtAudio stream.
2800 // This implementation also requires a number of external ASIO files
2801 // and a few global variables. The ASIO callback scheme does not
2802 // allow for the passing of user data, so we must create a global
2803 // pointer to our callbackInfo structure.
2805 // On unix systems, we make use of a pthread condition variable.
2806 // Since there is no equivalent in Windows, I hacked something based
2807 // on information found in
2808 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2810 #include "asiosys.h"
2812 #include "iasiothiscallresolver.h"
2813 #include "asiodrivers.h"
2816 static AsioDrivers drivers;
2817 static ASIOCallbacks asioCallbacks;
2818 static ASIODriverInfo driverInfo;
2819 static CallbackInfo *asioCallbackInfo;
2820 static bool asioXRun;
2823 int drainCounter; // Tracks callback counts when draining
2824 bool internalDrain; // Indicates if stop is initiated from callback or not.
2825 ASIOBufferInfo *bufferInfos;
2829 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2832 // Function declarations (definitions at end of section)
2833 static const char* getAsioErrorString( ASIOError result );
2834 static void sampleRateChanged( ASIOSampleRate sRate );
2835 static long asioMessages( long selector, long value, void* message, double* opt );
2837 RtApiAsio :: RtApiAsio()
2839 // ASIO cannot run on a multi-threaded appartment. You can call
2840 // CoInitialize beforehand, but it must be for appartment threading
2841 // (in which case, CoInitilialize will return S_FALSE here).
2842 coInitialized_ = false;
2843 HRESULT hr = CoInitialize( NULL );
2845 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2846 error( RtAudioError::WARNING );
2848 coInitialized_ = true;
2850 drivers.removeCurrentDriver();
2851 driverInfo.asioVersion = 2;
2853 // See note in DirectSound implementation about GetDesktopWindow().
2854 driverInfo.sysRef = GetForegroundWindow();
2857 RtApiAsio :: ~RtApiAsio()
2859 if ( stream_.state != STREAM_CLOSED ) closeStream();
2860 if ( coInitialized_ ) CoUninitialize();
2863 unsigned int RtApiAsio :: getDeviceCount( void )
2865 return (unsigned int) drivers.asioGetNumDev();
2868 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2870 RtAudio::DeviceInfo info;
2871 info.probed = false;
2874 unsigned int nDevices = getDeviceCount();
2875 if ( nDevices == 0 ) {
2876 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2877 error( RtAudioError::INVALID_USE );
2881 if ( device >= nDevices ) {
2882 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2883 error( RtAudioError::INVALID_USE );
2887 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2888 if ( stream_.state != STREAM_CLOSED ) {
2889 if ( device >= devices_.size() ) {
2890 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2891 error( RtAudioError::WARNING );
2894 return devices_[ device ];
2897 char driverName[32];
2898 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2899 if ( result != ASE_OK ) {
2900 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2901 errorText_ = errorStream_.str();
2902 error( RtAudioError::WARNING );
2906 info.name = driverName;
2908 if ( !drivers.loadDriver( driverName ) ) {
2909 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2915 result = ASIOInit( &driverInfo );
2916 if ( result != ASE_OK ) {
2917 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2918 errorText_ = errorStream_.str();
2919 error( RtAudioError::WARNING );
2923 // Determine the device channel information.
2924 long inputChannels, outputChannels;
2925 result = ASIOGetChannels( &inputChannels, &outputChannels );
2926 if ( result != ASE_OK ) {
2927 drivers.removeCurrentDriver();
2928 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2929 errorText_ = errorStream_.str();
2930 error( RtAudioError::WARNING );
2934 info.outputChannels = outputChannels;
2935 info.inputChannels = inputChannels;
2936 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2937 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2939 // Determine the supported sample rates.
2940 info.sampleRates.clear();
2941 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2942 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2943 if ( result == ASE_OK ) {
2944 info.sampleRates.push_back( SAMPLE_RATES[i] );
2946 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2947 info.preferredSampleRate = SAMPLE_RATES[i];
2951 // Determine supported data types ... just check first channel and assume rest are the same.
2952 ASIOChannelInfo channelInfo;
2953 channelInfo.channel = 0;
2954 channelInfo.isInput = true;
2955 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2956 result = ASIOGetChannelInfo( &channelInfo );
2957 if ( result != ASE_OK ) {
2958 drivers.removeCurrentDriver();
2959 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2960 errorText_ = errorStream_.str();
2961 error( RtAudioError::WARNING );
2965 info.nativeFormats = 0;
2966 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2967 info.nativeFormats |= RTAUDIO_SINT16;
2968 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2969 info.nativeFormats |= RTAUDIO_SINT32;
2970 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2971 info.nativeFormats |= RTAUDIO_FLOAT32;
2972 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2973 info.nativeFormats |= RTAUDIO_FLOAT64;
2974 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2975 info.nativeFormats |= RTAUDIO_SINT24;
2977 if ( info.outputChannels > 0 )
2978 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2979 if ( info.inputChannels > 0 )
2980 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2983 drivers.removeCurrentDriver();
2987 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2989 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2990 object->callbackEvent( index );
2993 void RtApiAsio :: saveDeviceInfo( void )
2997 unsigned int nDevices = getDeviceCount();
2998 devices_.resize( nDevices );
2999 for ( unsigned int i=0; i<nDevices; i++ )
3000 devices_[i] = getDeviceInfo( i );
3003 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3004 unsigned int firstChannel, unsigned int sampleRate,
3005 RtAudioFormat format, unsigned int *bufferSize,
3006 RtAudio::StreamOptions *options )
3007 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3009 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3011 // For ASIO, a duplex stream MUST use the same driver.
3012 if ( isDuplexInput && stream_.device[0] != device ) {
3013 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3017 char driverName[32];
3018 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3019 if ( result != ASE_OK ) {
3020 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3021 errorText_ = errorStream_.str();
3025 // Only load the driver once for duplex stream.
3026 if ( !isDuplexInput ) {
3027 // The getDeviceInfo() function will not work when a stream is open
3028 // because ASIO does not allow multiple devices to run at the same
3029 // time. Thus, we'll probe the system before opening a stream and
3030 // save the results for use by getDeviceInfo().
3031 this->saveDeviceInfo();
3033 if ( !drivers.loadDriver( driverName ) ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3035 errorText_ = errorStream_.str();
3039 result = ASIOInit( &driverInfo );
3040 if ( result != ASE_OK ) {
3041 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3042 errorText_ = errorStream_.str();
3047 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3048 bool buffersAllocated = false;
3049 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3050 unsigned int nChannels;
3053 // Check the device channel count.
3054 long inputChannels, outputChannels;
3055 result = ASIOGetChannels( &inputChannels, &outputChannels );
3056 if ( result != ASE_OK ) {
3057 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3058 errorText_ = errorStream_.str();
3062 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3063 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3064 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3065 errorText_ = errorStream_.str();
3068 stream_.nDeviceChannels[mode] = channels;
3069 stream_.nUserChannels[mode] = channels;
3070 stream_.channelOffset[mode] = firstChannel;
3072 // Verify the sample rate is supported.
3073 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3074 if ( result != ASE_OK ) {
3075 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3076 errorText_ = errorStream_.str();
3080 // Get the current sample rate
3081 ASIOSampleRate currentRate;
3082 result = ASIOGetSampleRate( ¤tRate );
3083 if ( result != ASE_OK ) {
3084 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3085 errorText_ = errorStream_.str();
3089 // Set the sample rate only if necessary
3090 if ( currentRate != sampleRate ) {
3091 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3092 if ( result != ASE_OK ) {
3093 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3094 errorText_ = errorStream_.str();
3099 // Determine the driver data type.
3100 ASIOChannelInfo channelInfo;
3101 channelInfo.channel = 0;
3102 if ( mode == OUTPUT ) channelInfo.isInput = false;
3103 else channelInfo.isInput = true;
3104 result = ASIOGetChannelInfo( &channelInfo );
3105 if ( result != ASE_OK ) {
3106 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3107 errorText_ = errorStream_.str();
3111 // Assuming WINDOWS host is always little-endian.
3112 stream_.doByteSwap[mode] = false;
3113 stream_.userFormat = format;
3114 stream_.deviceFormat[mode] = 0;
3115 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3116 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3117 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3119 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3120 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3121 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3123 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3124 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3125 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3127 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3128 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3129 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3131 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3132 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3133 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3136 if ( stream_.deviceFormat[mode] == 0 ) {
3137 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3138 errorText_ = errorStream_.str();
3142 // Set the buffer size. For a duplex stream, this will end up
3143 // setting the buffer size based on the input constraints, which
3145 long minSize, maxSize, preferSize, granularity;
3146 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3147 if ( result != ASE_OK ) {
3148 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3149 errorText_ = errorStream_.str();
3153 if ( isDuplexInput ) {
3154 // When this is the duplex input (output was opened before), then we have to use the same
3155 // buffersize as the output, because it might use the preferred buffer size, which most
3156 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3157 // So instead of throwing an error, make them equal. The caller uses the reference
3158 // to the "bufferSize" param as usual to set up processing buffers.
3160 *bufferSize = stream_.bufferSize;
3163 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3164 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3165 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3166 else if ( granularity == -1 ) {
3167 // Make sure bufferSize is a power of two.
3168 int log2_of_min_size = 0;
3169 int log2_of_max_size = 0;
3171 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3172 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3173 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3176 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3177 int min_delta_num = log2_of_min_size;
3179 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3180 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3181 if (current_delta < min_delta) {
3182 min_delta = current_delta;
3187 *bufferSize = ( (unsigned int)1 << min_delta_num );
3188 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3189 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3191 else if ( granularity != 0 ) {
3192 // Set to an even multiple of granularity, rounding up.
3193 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3198 // we don't use it anymore, see above!
3199 // Just left it here for the case...
3200 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3201 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3206 stream_.bufferSize = *bufferSize;
3207 stream_.nBuffers = 2;
3209 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3210 else stream_.userInterleaved = true;
3212 // ASIO always uses non-interleaved buffers.
3213 stream_.deviceInterleaved[mode] = false;
3215 // Allocate, if necessary, our AsioHandle structure for the stream.
3216 if ( handle == 0 ) {
3218 handle = new AsioHandle;
3220 catch ( std::bad_alloc& ) {
3221 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3224 handle->bufferInfos = 0;
3226 // Create a manual-reset event.
3227 handle->condition = CreateEvent( NULL, // no security
3228 TRUE, // manual-reset
3229 FALSE, // non-signaled initially
3231 stream_.apiHandle = (void *) handle;
3234 // Create the ASIO internal buffers. Since RtAudio sets up input
3235 // and output separately, we'll have to dispose of previously
3236 // created output buffers for a duplex stream.
3237 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3238 ASIODisposeBuffers();
3239 if ( handle->bufferInfos ) free( handle->bufferInfos );
3242 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3244 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3245 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3246 if ( handle->bufferInfos == NULL ) {
3247 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3248 errorText_ = errorStream_.str();
3252 ASIOBufferInfo *infos;
3253 infos = handle->bufferInfos;
3254 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3255 infos->isInput = ASIOFalse;
3256 infos->channelNum = i + stream_.channelOffset[0];
3257 infos->buffers[0] = infos->buffers[1] = 0;
3259 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3260 infos->isInput = ASIOTrue;
3261 infos->channelNum = i + stream_.channelOffset[1];
3262 infos->buffers[0] = infos->buffers[1] = 0;
3265 // prepare for callbacks
3266 stream_.sampleRate = sampleRate;
3267 stream_.device[mode] = device;
3268 stream_.mode = isDuplexInput ? DUPLEX : mode;
3270 // store this class instance before registering callbacks, that are going to use it
3271 asioCallbackInfo = &stream_.callbackInfo;
3272 stream_.callbackInfo.object = (void *) this;
3274 // Set up the ASIO callback structure and create the ASIO data buffers.
3275 asioCallbacks.bufferSwitch = &bufferSwitch;
3276 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3277 asioCallbacks.asioMessage = &asioMessages;
3278 asioCallbacks.bufferSwitchTimeInfo = NULL;
3279 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3280 if ( result != ASE_OK ) {
3281 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3282 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3283 // In that case, let's be naïve and try that instead.
3284 *bufferSize = preferSize;
3285 stream_.bufferSize = *bufferSize;
3286 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3289 if ( result != ASE_OK ) {
3290 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3291 errorText_ = errorStream_.str();
3294 buffersAllocated = true;
3295 stream_.state = STREAM_STOPPED;
3297 // Set flags for buffer conversion.
3298 stream_.doConvertBuffer[mode] = false;
3299 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3300 stream_.doConvertBuffer[mode] = true;
3301 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3302 stream_.nUserChannels[mode] > 1 )
3303 stream_.doConvertBuffer[mode] = true;
3305 // Allocate necessary internal buffers
3306 unsigned long bufferBytes;
3307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3308 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3309 if ( stream_.userBuffer[mode] == NULL ) {
3310 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3314 if ( stream_.doConvertBuffer[mode] ) {
3316 bool makeBuffer = true;
3317 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3318 if ( isDuplexInput && stream_.deviceBuffer ) {
3319 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3320 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3324 bufferBytes *= *bufferSize;
3325 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3326 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3327 if ( stream_.deviceBuffer == NULL ) {
3328 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3334 // Determine device latencies
3335 long inputLatency, outputLatency;
3336 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3337 if ( result != ASE_OK ) {
3338 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3339 errorText_ = errorStream_.str();
3340 error( RtAudioError::WARNING); // warn but don't fail
3343 stream_.latency[0] = outputLatency;
3344 stream_.latency[1] = inputLatency;
3347 // Setup the buffer conversion information structure. We don't use
3348 // buffers to do channel offsets, so we override that parameter
3350 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3355 if ( !isDuplexInput ) {
3356 // the cleanup for error in the duplex input, is done by RtApi::openStream
3357 // So we clean up for single channel only
3359 if ( buffersAllocated )
3360 ASIODisposeBuffers();
3362 drivers.removeCurrentDriver();
3365 CloseHandle( handle->condition );
3366 if ( handle->bufferInfos )
3367 free( handle->bufferInfos );
3370 stream_.apiHandle = 0;
3374 if ( stream_.userBuffer[mode] ) {
3375 free( stream_.userBuffer[mode] );
3376 stream_.userBuffer[mode] = 0;
3379 if ( stream_.deviceBuffer ) {
3380 free( stream_.deviceBuffer );
3381 stream_.deviceBuffer = 0;
3386 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3388 void RtApiAsio :: closeStream()
3390 if ( stream_.state == STREAM_CLOSED ) {
3391 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3392 error( RtAudioError::WARNING );
3396 if ( stream_.state == STREAM_RUNNING ) {
3397 stream_.state = STREAM_STOPPED;
3400 ASIODisposeBuffers();
3401 drivers.removeCurrentDriver();
3403 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3405 CloseHandle( handle->condition );
3406 if ( handle->bufferInfos )
3407 free( handle->bufferInfos );
3409 stream_.apiHandle = 0;
3412 for ( int i=0; i<2; i++ ) {
3413 if ( stream_.userBuffer[i] ) {
3414 free( stream_.userBuffer[i] );
3415 stream_.userBuffer[i] = 0;
3419 if ( stream_.deviceBuffer ) {
3420 free( stream_.deviceBuffer );
3421 stream_.deviceBuffer = 0;
3424 stream_.mode = UNINITIALIZED;
3425 stream_.state = STREAM_CLOSED;
3428 bool stopThreadCalled = false;
3430 void RtApiAsio :: startStream()
3433 if ( stream_.state == STREAM_RUNNING ) {
3434 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3435 error( RtAudioError::WARNING );
3439 #if defined( HAVE_GETTIMEOFDAY )
3440 gettimeofday( &stream_.lastTickTimestamp, NULL );
3443 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3444 ASIOError result = ASIOStart();
3445 if ( result != ASE_OK ) {
3446 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3447 errorText_ = errorStream_.str();
3451 handle->drainCounter = 0;
3452 handle->internalDrain = false;
3453 ResetEvent( handle->condition );
3454 stream_.state = STREAM_RUNNING;
3458 stopThreadCalled = false;
3460 if ( result == ASE_OK ) return;
3461 error( RtAudioError::SYSTEM_ERROR );
3464 void RtApiAsio :: stopStream()
3467 if ( stream_.state == STREAM_STOPPED ) {
3468 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3469 error( RtAudioError::WARNING );
3473 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3474 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3475 if ( handle->drainCounter == 0 ) {
3476 handle->drainCounter = 2;
3477 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3481 stream_.state = STREAM_STOPPED;
3483 ASIOError result = ASIOStop();
3484 if ( result != ASE_OK ) {
3485 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3486 errorText_ = errorStream_.str();
3489 if ( result == ASE_OK ) return;
3490 error( RtAudioError::SYSTEM_ERROR );
3493 void RtApiAsio :: abortStream()
3496 if ( stream_.state == STREAM_STOPPED ) {
3497 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3498 error( RtAudioError::WARNING );
3502 // The following lines were commented-out because some behavior was
3503 // noted where the device buffers need to be zeroed to avoid
3504 // continuing sound, even when the device buffers are completely
3505 // disposed. So now, calling abort is the same as calling stop.
3506 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3507 // handle->drainCounter = 2;
3511 // This function will be called by a spawned thread when the user
3512 // callback function signals that the stream should be stopped or
3513 // aborted. It is necessary to handle it this way because the
3514 // callbackEvent() function must return before the ASIOStop()
3515 // function will return.
3516 static unsigned __stdcall asioStopStream( void *ptr )
3518 CallbackInfo *info = (CallbackInfo *) ptr;
3519 RtApiAsio *object = (RtApiAsio *) info->object;
3521 object->stopStream();
3526 bool RtApiAsio :: callbackEvent( long bufferIndex )
3528 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3529 if ( stream_.state == STREAM_CLOSED ) {
3530 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3531 error( RtAudioError::WARNING );
3535 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3536 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3538 // Check if we were draining the stream and signal if finished.
3539 if ( handle->drainCounter > 3 ) {
3541 stream_.state = STREAM_STOPPING;
3542 if ( handle->internalDrain == false )
3543 SetEvent( handle->condition );
3544 else { // spawn a thread to stop the stream
3546 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3547 &stream_.callbackInfo, 0, &threadId );
3552 // Invoke user callback to get fresh output data UNLESS we are
3554 if ( handle->drainCounter == 0 ) {
3555 RtAudioCallback callback = (RtAudioCallback) info->callback;
3556 double streamTime = getStreamTime();
3557 RtAudioStreamStatus status = 0;
3558 if ( stream_.mode != INPUT && asioXRun == true ) {
3559 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3562 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3563 status |= RTAUDIO_INPUT_OVERFLOW;
3566 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3567 stream_.bufferSize, streamTime, status, info->userData );
3568 if ( cbReturnValue == 2 ) {
3569 stream_.state = STREAM_STOPPING;
3570 handle->drainCounter = 2;
3572 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3573 &stream_.callbackInfo, 0, &threadId );
3576 else if ( cbReturnValue == 1 ) {
3577 handle->drainCounter = 1;
3578 handle->internalDrain = true;
3582 unsigned int nChannels, bufferBytes, i, j;
3583 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3584 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3586 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3588 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3592 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3596 else if ( stream_.doConvertBuffer[0] ) {
3598 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3599 if ( stream_.doByteSwap[0] )
3600 byteSwapBuffer( stream_.deviceBuffer,
3601 stream_.bufferSize * stream_.nDeviceChannels[0],
3602 stream_.deviceFormat[0] );
3604 for ( i=0, j=0; i<nChannels; i++ ) {
3605 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3606 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3607 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3613 if ( stream_.doByteSwap[0] )
3614 byteSwapBuffer( stream_.userBuffer[0],
3615 stream_.bufferSize * stream_.nUserChannels[0],
3616 stream_.userFormat );
3618 for ( i=0, j=0; i<nChannels; i++ ) {
3619 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3620 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3621 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3627 // Don't bother draining input
3628 if ( handle->drainCounter ) {
3629 handle->drainCounter++;
3633 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3635 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3637 if (stream_.doConvertBuffer[1]) {
3639 // Always interleave ASIO input data.
3640 for ( i=0, j=0; i<nChannels; i++ ) {
3641 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3642 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3643 handle->bufferInfos[i].buffers[bufferIndex],
3647 if ( stream_.doByteSwap[1] )
3648 byteSwapBuffer( stream_.deviceBuffer,
3649 stream_.bufferSize * stream_.nDeviceChannels[1],
3650 stream_.deviceFormat[1] );
3651 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3655 for ( i=0, j=0; i<nChannels; i++ ) {
3656 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3657 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3658 handle->bufferInfos[i].buffers[bufferIndex],
3663 if ( stream_.doByteSwap[1] )
3664 byteSwapBuffer( stream_.userBuffer[1],
3665 stream_.bufferSize * stream_.nUserChannels[1],
3666 stream_.userFormat );
3671 // The following call was suggested by Malte Clasen. While the API
3672 // documentation indicates it should not be required, some device
3673 // drivers apparently do not function correctly without it.
3676 RtApi::tickStreamTime();
3680 static void sampleRateChanged( ASIOSampleRate sRate )
3682 // The ASIO documentation says that this usually only happens during
3683 // external sync. Audio processing is not stopped by the driver,
3684 // actual sample rate might not have even changed, maybe only the
3685 // sample rate status of an AES/EBU or S/PDIF digital input at the
3688 RtApi *object = (RtApi *) asioCallbackInfo->object;
3690 object->stopStream();
3692 catch ( RtAudioError &exception ) {
3693 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3697 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3700 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3704 switch( selector ) {
3705 case kAsioSelectorSupported:
3706 if ( value == kAsioResetRequest
3707 || value == kAsioEngineVersion
3708 || value == kAsioResyncRequest
3709 || value == kAsioLatenciesChanged
3710 // The following three were added for ASIO 2.0, you don't
3711 // necessarily have to support them.
3712 || value == kAsioSupportsTimeInfo
3713 || value == kAsioSupportsTimeCode
3714 || value == kAsioSupportsInputMonitor)
3717 case kAsioResetRequest:
3718 // Defer the task and perform the reset of the driver during the
3719 // next "safe" situation. You cannot reset the driver right now,
3720 // as this code is called from the driver. Reset the driver is
3721 // done by completely destruct is. I.e. ASIOStop(),
3722 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3724 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3727 case kAsioResyncRequest:
3728 // This informs the application that the driver encountered some
3729 // non-fatal data loss. It is used for synchronization purposes
3730 // of different media. Added mainly to work around the Win16Mutex
3731 // problems in Windows 95/98 with the Windows Multimedia system,
3732 // which could lose data because the Mutex was held too long by
3733 // another thread. However a driver can issue it in other
3735 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3739 case kAsioLatenciesChanged:
3740 // This will inform the host application that the drivers were
3741 // latencies changed. Beware, it this does not mean that the
3742 // buffer sizes have changed! You might need to update internal
3744 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3747 case kAsioEngineVersion:
3748 // Return the supported ASIO version of the host application. If
3749 // a host application does not implement this selector, ASIO 1.0
3750 // is assumed by the driver.
3753 case kAsioSupportsTimeInfo:
3754 // Informs the driver whether the
3755 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3756 // For compatibility with ASIO 1.0 drivers the host application
3757 // should always support the "old" bufferSwitch method, too.
3760 case kAsioSupportsTimeCode:
3761 // Informs the driver whether application is interested in time
3762 // code info. If an application does not need to know about time
3763 // code, the driver has less work to do.
3770 static const char* getAsioErrorString( ASIOError result )
3778 static const Messages m[] =
3780 { ASE_NotPresent, "Hardware input or output is not present or available." },
3781 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3782 { ASE_InvalidParameter, "Invalid input parameter." },
3783 { ASE_InvalidMode, "Invalid mode." },
3784 { ASE_SPNotAdvancing, "Sample position not advancing." },
3785 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3786 { ASE_NoMemory, "Not enough memory to complete the request." }
3789 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3790 if ( m[i].value == result ) return m[i].message;
3792 return "Unknown error.";
3795 //******************** End of __WINDOWS_ASIO__ *********************//
3799 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3801 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3802 // - Introduces support for the Windows WASAPI API
3803 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3804 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3805 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3812 #include <mferror.h>
3814 #include <mftransform.h>
3815 #include <wmcodecdsp.h>
3817 #include <audioclient.h>
3819 #include <mmdeviceapi.h>
3820 #include <functiondiscoverykeys_devpkey.h>
3822 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3823 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3826 #ifndef MFSTARTUP_NOSOCKET
3827 #define MFSTARTUP_NOSOCKET 0x1
3831 #pragma comment( lib, "ksuser" )
3832 #pragma comment( lib, "mfplat.lib" )
3833 #pragma comment( lib, "mfuuid.lib" )
3834 #pragma comment( lib, "wmcodecdspuuid" )
3837 //=============================================================================
3839 #define SAFE_RELEASE( objectPtr )\
3842 objectPtr->Release();\
3846 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3848 //-----------------------------------------------------------------------------
3850 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3851 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3852 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3853 // provide intermediate storage for read / write synchronization.
3867 // sets the length of the internal ring buffer
3868 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3871 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3873 bufferSize_ = bufferSize;
3878 // attempt to push a buffer into the ring buffer at the current "in" index
3879 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3881 if ( !buffer || // incoming buffer is NULL
3882 bufferSize == 0 || // incoming buffer has no data
3883 bufferSize > bufferSize_ ) // incoming buffer too large
3888 unsigned int relOutIndex = outIndex_;
3889 unsigned int inIndexEnd = inIndex_ + bufferSize;
3890 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3891 relOutIndex += bufferSize_;
3894 // the "IN" index CAN BEGIN at the "OUT" index
3895 // the "IN" index CANNOT END at the "OUT" index
3896 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3897 return false; // not enough space between "in" index and "out" index
3900 // copy buffer from external to internal
3901 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3902 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3903 int fromInSize = bufferSize - fromZeroSize;
3908 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3909 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3911 case RTAUDIO_SINT16:
3912 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3913 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3915 case RTAUDIO_SINT24:
3916 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3917 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3919 case RTAUDIO_SINT32:
3920 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3921 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3923 case RTAUDIO_FLOAT32:
3924 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3925 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3927 case RTAUDIO_FLOAT64:
3928 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3929 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3933 // update "in" index
3934 inIndex_ += bufferSize;
3935 inIndex_ %= bufferSize_;
3940 // attempt to pull a buffer from the ring buffer from the current "out" index
3941 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3943 if ( !buffer || // incoming buffer is NULL
3944 bufferSize == 0 || // incoming buffer has no data
3945 bufferSize > bufferSize_ ) // incoming buffer too large
3950 unsigned int relInIndex = inIndex_;
3951 unsigned int outIndexEnd = outIndex_ + bufferSize;
3952 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3953 relInIndex += bufferSize_;
3956 // the "OUT" index CANNOT BEGIN at the "IN" index
3957 // the "OUT" index CAN END at the "IN" index
3958 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3959 return false; // not enough space between "out" index and "in" index
3962 // copy buffer from internal to external
3963 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3964 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3965 int fromOutSize = bufferSize - fromZeroSize;
3970 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3971 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3973 case RTAUDIO_SINT16:
3974 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3975 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3977 case RTAUDIO_SINT24:
3978 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3979 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3981 case RTAUDIO_SINT32:
3982 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3983 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3985 case RTAUDIO_FLOAT32:
3986 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3987 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3989 case RTAUDIO_FLOAT64:
3990 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3991 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3995 // update "out" index
3996 outIndex_ += bufferSize;
3997 outIndex_ %= bufferSize_;
4004 unsigned int bufferSize_;
4005 unsigned int inIndex_;
4006 unsigned int outIndex_;
4009 //-----------------------------------------------------------------------------
4011 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4012 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4013 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4014 class WasapiResampler
4017 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4018 unsigned int inSampleRate, unsigned int outSampleRate )
4019 : _bytesPerSample( bitsPerSample / 8 )
4020 , _channelCount( channelCount )
4021 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4022 , _transformUnk( NULL )
4023 , _transform( NULL )
4024 , _mediaType( NULL )
4025 , _inputMediaType( NULL )
4026 , _outputMediaType( NULL )
4028 #ifdef __IWMResamplerProps_FWD_DEFINED__
4029 , _resamplerProps( NULL )
4032 // 1. Initialization
4034 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4036 // 2. Create Resampler Transform Object
4038 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4039 IID_IUnknown, ( void** ) &_transformUnk );
4041 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4043 #ifdef __IWMResamplerProps_FWD_DEFINED__
4044 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4045 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4048 // 3. Specify input / output format
4050 MFCreateMediaType( &_mediaType );
4051 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4052 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4053 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4054 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4055 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4056 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4057 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4058 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4060 MFCreateMediaType( &_inputMediaType );
4061 _mediaType->CopyAllItems( _inputMediaType );
4063 _transform->SetInputType( 0, _inputMediaType, 0 );
4065 MFCreateMediaType( &_outputMediaType );
4066 _mediaType->CopyAllItems( _outputMediaType );
4068 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4069 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4071 _transform->SetOutputType( 0, _outputMediaType, 0 );
4073 // 4. Send stream start messages to Resampler
4075 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4076 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4077 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4082 // 8. Send stream stop messages to Resampler
4084 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4085 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4091 SAFE_RELEASE( _transformUnk );
4092 SAFE_RELEASE( _transform );
4093 SAFE_RELEASE( _mediaType );
4094 SAFE_RELEASE( _inputMediaType );
4095 SAFE_RELEASE( _outputMediaType );
4097 #ifdef __IWMResamplerProps_FWD_DEFINED__
4098 SAFE_RELEASE( _resamplerProps );
4102 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4104 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4105 if ( _sampleRatio == 1 )
4107 // no sample rate conversion required
4108 memcpy( outBuffer, inBuffer, inputBufferSize );
4109 outSampleCount = inSampleCount;
4113 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4115 IMFMediaBuffer* rInBuffer;
4116 IMFSample* rInSample;
4117 BYTE* rInByteBuffer = NULL;
4119 // 5. Create Sample object from input data
4121 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4123 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4124 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4125 rInBuffer->Unlock();
4126 rInByteBuffer = NULL;
4128 rInBuffer->SetCurrentLength( inputBufferSize );
4130 MFCreateSample( &rInSample );
4131 rInSample->AddBuffer( rInBuffer );
4133 // 6. Pass input data to Resampler
4135 _transform->ProcessInput( 0, rInSample, 0 );
4137 SAFE_RELEASE( rInBuffer );
4138 SAFE_RELEASE( rInSample );
4140 // 7. Perform sample rate conversion
4142 IMFMediaBuffer* rOutBuffer = NULL;
4143 BYTE* rOutByteBuffer = NULL;
4145 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4147 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4149 // 7.1 Create Sample object for output data
4151 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4152 MFCreateSample( &( rOutDataBuffer.pSample ) );
4153 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4154 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4155 rOutDataBuffer.dwStreamID = 0;
4156 rOutDataBuffer.dwStatus = 0;
4157 rOutDataBuffer.pEvents = NULL;
4159 // 7.2 Get output data from Resampler
4161 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4164 SAFE_RELEASE( rOutBuffer );
4165 SAFE_RELEASE( rOutDataBuffer.pSample );
4169 // 7.3 Write output data to outBuffer
4171 SAFE_RELEASE( rOutBuffer );
4172 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4173 rOutBuffer->GetCurrentLength( &rBytes );
4175 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4176 memcpy( outBuffer, rOutByteBuffer, rBytes );
4177 rOutBuffer->Unlock();
4178 rOutByteBuffer = NULL;
4180 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4181 SAFE_RELEASE( rOutBuffer );
4182 SAFE_RELEASE( rOutDataBuffer.pSample );
4186 unsigned int _bytesPerSample;
4187 unsigned int _channelCount;
4190 IUnknown* _transformUnk;
4191 IMFTransform* _transform;
4192 IMFMediaType* _mediaType;
4193 IMFMediaType* _inputMediaType;
4194 IMFMediaType* _outputMediaType;
4196 #ifdef __IWMResamplerProps_FWD_DEFINED__
4197 IWMResamplerProps* _resamplerProps;
4201 //-----------------------------------------------------------------------------
4203 // A structure to hold various information related to the WASAPI implementation.
4206 IAudioClient* captureAudioClient;
4207 IAudioClient* renderAudioClient;
4208 IAudioCaptureClient* captureClient;
4209 IAudioRenderClient* renderClient;
4210 HANDLE captureEvent;
4214 : captureAudioClient( NULL ),
4215 renderAudioClient( NULL ),
4216 captureClient( NULL ),
4217 renderClient( NULL ),
4218 captureEvent( NULL ),
4219 renderEvent( NULL ) {}
4222 //=============================================================================
4224 RtApiWasapi::RtApiWasapi()
4225 : coInitialized_( false ), deviceEnumerator_( NULL )
4227 // WASAPI can run either apartment or multi-threaded
4228 HRESULT hr = CoInitialize( NULL );
4229 if ( !FAILED( hr ) )
4230 coInitialized_ = true;
4232 // Instantiate device enumerator
4233 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4234 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4235 ( void** ) &deviceEnumerator_ );
4237 // If this runs on an old Windows, it will fail. Ignore and proceed.
4239 deviceEnumerator_ = NULL;
4242 //-----------------------------------------------------------------------------
4244 RtApiWasapi::~RtApiWasapi()
4246 if ( stream_.state != STREAM_CLOSED )
4249 SAFE_RELEASE( deviceEnumerator_ );
4251 // If this object previously called CoInitialize()
4252 if ( coInitialized_ )
4256 //=============================================================================
4258 unsigned int RtApiWasapi::getDeviceCount( void )
4260 unsigned int captureDeviceCount = 0;
4261 unsigned int renderDeviceCount = 0;
4263 IMMDeviceCollection* captureDevices = NULL;
4264 IMMDeviceCollection* renderDevices = NULL;
4266 if ( !deviceEnumerator_ )
4269 // Count capture devices
4271 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4272 if ( FAILED( hr ) ) {
4273 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4277 hr = captureDevices->GetCount( &captureDeviceCount );
4278 if ( FAILED( hr ) ) {
4279 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4283 // Count render devices
4284 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4285 if ( FAILED( hr ) ) {
4286 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4290 hr = renderDevices->GetCount( &renderDeviceCount );
4291 if ( FAILED( hr ) ) {
4292 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4297 // release all references
4298 SAFE_RELEASE( captureDevices );
4299 SAFE_RELEASE( renderDevices );
4301 if ( errorText_.empty() )
4302 return captureDeviceCount + renderDeviceCount;
4304 error( RtAudioError::DRIVER_ERROR );
4308 //-----------------------------------------------------------------------------
4310 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4312 RtAudio::DeviceInfo info;
4313 unsigned int captureDeviceCount = 0;
4314 unsigned int renderDeviceCount = 0;
4315 std::string defaultDeviceName;
4316 bool isCaptureDevice = false;
4318 PROPVARIANT deviceNameProp;
4319 PROPVARIANT defaultDeviceNameProp;
4321 IMMDeviceCollection* captureDevices = NULL;
4322 IMMDeviceCollection* renderDevices = NULL;
4323 IMMDevice* devicePtr = NULL;
4324 IMMDevice* defaultDevicePtr = NULL;
4325 IAudioClient* audioClient = NULL;
4326 IPropertyStore* devicePropStore = NULL;
4327 IPropertyStore* defaultDevicePropStore = NULL;
4329 WAVEFORMATEX* deviceFormat = NULL;
4330 WAVEFORMATEX* closestMatchFormat = NULL;
4333 info.probed = false;
4335 // Count capture devices
4337 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4338 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4339 if ( FAILED( hr ) ) {
4340 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4344 hr = captureDevices->GetCount( &captureDeviceCount );
4345 if ( FAILED( hr ) ) {
4346 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4350 // Count render devices
4351 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4352 if ( FAILED( hr ) ) {
4353 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4357 hr = renderDevices->GetCount( &renderDeviceCount );
4358 if ( FAILED( hr ) ) {
4359 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4363 // validate device index
4364 if ( device >= captureDeviceCount + renderDeviceCount ) {
4365 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4366 errorType = RtAudioError::INVALID_USE;
4370 // determine whether index falls within capture or render devices
4371 if ( device >= renderDeviceCount ) {
4372 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4373 if ( FAILED( hr ) ) {
4374 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4377 isCaptureDevice = true;
4380 hr = renderDevices->Item( device, &devicePtr );
4381 if ( FAILED( hr ) ) {
4382 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4385 isCaptureDevice = false;
4388 // get default device name
4389 if ( isCaptureDevice ) {
4390 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4391 if ( FAILED( hr ) ) {
4392 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4397 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4398 if ( FAILED( hr ) ) {
4399 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4404 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4405 if ( FAILED( hr ) ) {
4406 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4409 PropVariantInit( &defaultDeviceNameProp );
4411 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4412 if ( FAILED( hr ) ) {
4413 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4417 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4420 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4421 if ( FAILED( hr ) ) {
4422 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4426 PropVariantInit( &deviceNameProp );
4428 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4429 if ( FAILED( hr ) ) {
4430 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4434 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4437 if ( isCaptureDevice ) {
4438 info.isDefaultInput = info.name == defaultDeviceName;
4439 info.isDefaultOutput = false;
4442 info.isDefaultInput = false;
4443 info.isDefaultOutput = info.name == defaultDeviceName;
4447 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4448 if ( FAILED( hr ) ) {
4449 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4453 hr = audioClient->GetMixFormat( &deviceFormat );
4454 if ( FAILED( hr ) ) {
4455 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4459 if ( isCaptureDevice ) {
4460 info.inputChannels = deviceFormat->nChannels;
4461 info.outputChannels = 0;
4462 info.duplexChannels = 0;
4465 info.inputChannels = 0;
4466 info.outputChannels = deviceFormat->nChannels;
4467 info.duplexChannels = 0;
4471 info.sampleRates.clear();
4473 // allow support for all sample rates as we have a built-in sample rate converter
4474 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4475 info.sampleRates.push_back( SAMPLE_RATES[i] );
4477 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4480 info.nativeFormats = 0;
4482 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4483 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4484 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4486 if ( deviceFormat->wBitsPerSample == 32 ) {
4487 info.nativeFormats |= RTAUDIO_FLOAT32;
4489 else if ( deviceFormat->wBitsPerSample == 64 ) {
4490 info.nativeFormats |= RTAUDIO_FLOAT64;
4493 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4494 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4495 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4497 if ( deviceFormat->wBitsPerSample == 8 ) {
4498 info.nativeFormats |= RTAUDIO_SINT8;
4500 else if ( deviceFormat->wBitsPerSample == 16 ) {
4501 info.nativeFormats |= RTAUDIO_SINT16;
4503 else if ( deviceFormat->wBitsPerSample == 24 ) {
4504 info.nativeFormats |= RTAUDIO_SINT24;
4506 else if ( deviceFormat->wBitsPerSample == 32 ) {
4507 info.nativeFormats |= RTAUDIO_SINT32;
4515 // release all references
4516 PropVariantClear( &deviceNameProp );
4517 PropVariantClear( &defaultDeviceNameProp );
4519 SAFE_RELEASE( captureDevices );
4520 SAFE_RELEASE( renderDevices );
4521 SAFE_RELEASE( devicePtr );
4522 SAFE_RELEASE( defaultDevicePtr );
4523 SAFE_RELEASE( audioClient );
4524 SAFE_RELEASE( devicePropStore );
4525 SAFE_RELEASE( defaultDevicePropStore );
4527 CoTaskMemFree( deviceFormat );
4528 CoTaskMemFree( closestMatchFormat );
4530 if ( !errorText_.empty() )
4535 //-----------------------------------------------------------------------------
4537 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4539 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4540 if ( getDeviceInfo( i ).isDefaultOutput ) {
4548 //-----------------------------------------------------------------------------
4550 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4552 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4553 if ( getDeviceInfo( i ).isDefaultInput ) {
4561 //-----------------------------------------------------------------------------
4563 void RtApiWasapi::closeStream( void )
4565 if ( stream_.state == STREAM_CLOSED ) {
4566 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4567 error( RtAudioError::WARNING );
4571 if ( stream_.state != STREAM_STOPPED )
4574 // clean up stream memory
4575 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4576 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4578 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4579 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4581 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4582 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4584 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4585 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4587 delete ( WasapiHandle* ) stream_.apiHandle;
4588 stream_.apiHandle = NULL;
4590 for ( int i = 0; i < 2; i++ ) {
4591 if ( stream_.userBuffer[i] ) {
4592 free( stream_.userBuffer[i] );
4593 stream_.userBuffer[i] = 0;
4597 if ( stream_.deviceBuffer ) {
4598 free( stream_.deviceBuffer );
4599 stream_.deviceBuffer = 0;
4602 // update stream state
4603 stream_.state = STREAM_CLOSED;
4606 //-----------------------------------------------------------------------------
4608 void RtApiWasapi::startStream( void )
4612 if ( stream_.state == STREAM_RUNNING ) {
4613 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4614 error( RtAudioError::WARNING );
4618 #if defined( HAVE_GETTIMEOFDAY )
4619 gettimeofday( &stream_.lastTickTimestamp, NULL );
4622 // update stream state
4623 stream_.state = STREAM_RUNNING;
4625 // create WASAPI stream thread
4626 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4628 if ( !stream_.callbackInfo.thread ) {
4629 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4630 error( RtAudioError::THREAD_ERROR );
4633 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4634 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4638 //-----------------------------------------------------------------------------
4640 void RtApiWasapi::stopStream( void )
4644 if ( stream_.state == STREAM_STOPPED ) {
4645 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4646 error( RtAudioError::WARNING );
4650 // inform stream thread by setting stream state to STREAM_STOPPING
4651 stream_.state = STREAM_STOPPING;
4653 // wait until stream thread is stopped
4654 while( stream_.state != STREAM_STOPPED ) {
4658 // Wait for the last buffer to play before stopping.
4659 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4661 // close thread handle
4662 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4663 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4664 error( RtAudioError::THREAD_ERROR );
4668 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4671 //-----------------------------------------------------------------------------
4673 void RtApiWasapi::abortStream( void )
4677 if ( stream_.state == STREAM_STOPPED ) {
4678 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4679 error( RtAudioError::WARNING );
4683 // inform stream thread by setting stream state to STREAM_STOPPING
4684 stream_.state = STREAM_STOPPING;
4686 // wait until stream thread is stopped
4687 while ( stream_.state != STREAM_STOPPED ) {
4691 // close thread handle
4692 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4693 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4694 error( RtAudioError::THREAD_ERROR );
4698 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4701 //-----------------------------------------------------------------------------
4703 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4704 unsigned int firstChannel, unsigned int sampleRate,
4705 RtAudioFormat format, unsigned int* bufferSize,
4706 RtAudio::StreamOptions* options )
4708 bool methodResult = FAILURE;
4709 unsigned int captureDeviceCount = 0;
4710 unsigned int renderDeviceCount = 0;
4712 IMMDeviceCollection* captureDevices = NULL;
4713 IMMDeviceCollection* renderDevices = NULL;
4714 IMMDevice* devicePtr = NULL;
4715 WAVEFORMATEX* deviceFormat = NULL;
4716 unsigned int bufferBytes;
4717 stream_.state = STREAM_STOPPED;
4719 // create API Handle if not already created
4720 if ( !stream_.apiHandle )
4721 stream_.apiHandle = ( void* ) new WasapiHandle();
4723 // Count capture devices
4725 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4726 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4727 if ( FAILED( hr ) ) {
4728 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4732 hr = captureDevices->GetCount( &captureDeviceCount );
4733 if ( FAILED( hr ) ) {
4734 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4738 // Count render devices
4739 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4740 if ( FAILED( hr ) ) {
4741 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4745 hr = renderDevices->GetCount( &renderDeviceCount );
4746 if ( FAILED( hr ) ) {
4747 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4751 // validate device index
4752 if ( device >= captureDeviceCount + renderDeviceCount ) {
4753 errorType = RtAudioError::INVALID_USE;
4754 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4758 // if device index falls within capture devices
4759 if ( device >= renderDeviceCount ) {
4760 if ( mode != INPUT ) {
4761 errorType = RtAudioError::INVALID_USE;
4762 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4766 // retrieve captureAudioClient from devicePtr
4767 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4769 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4770 if ( FAILED( hr ) ) {
4771 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4775 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4776 NULL, ( void** ) &captureAudioClient );
4777 if ( FAILED( hr ) ) {
4778 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4782 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4783 if ( FAILED( hr ) ) {
4784 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4788 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4789 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4792 // if device index falls within render devices and is configured for loopback
4793 if ( device < renderDeviceCount && mode == INPUT )
4795 // if renderAudioClient is not initialised, initialise it now
4796 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4797 if ( !renderAudioClient )
4799 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4802 // retrieve captureAudioClient from devicePtr
4803 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4805 hr = renderDevices->Item( device, &devicePtr );
4806 if ( FAILED( hr ) ) {
4807 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4811 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4812 NULL, ( void** ) &captureAudioClient );
4813 if ( FAILED( hr ) ) {
4814 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4818 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4819 if ( FAILED( hr ) ) {
4820 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4824 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4825 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4828 // if device index falls within render devices and is configured for output
4829 if ( device < renderDeviceCount && mode == OUTPUT )
4831 // if renderAudioClient is already initialised, don't initialise it again
4832 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4833 if ( renderAudioClient )
4835 methodResult = SUCCESS;
4839 hr = renderDevices->Item( device, &devicePtr );
4840 if ( FAILED( hr ) ) {
4841 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4845 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4846 NULL, ( void** ) &renderAudioClient );
4847 if ( FAILED( hr ) ) {
4848 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4852 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4853 if ( FAILED( hr ) ) {
4854 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4858 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4859 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4863 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4864 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4865 stream_.mode = DUPLEX;
4868 stream_.mode = mode;
4871 stream_.device[mode] = device;
4872 stream_.doByteSwap[mode] = false;
4873 stream_.sampleRate = sampleRate;
4874 stream_.bufferSize = *bufferSize;
4875 stream_.nBuffers = 1;
4876 stream_.nUserChannels[mode] = channels;
4877 stream_.channelOffset[mode] = firstChannel;
4878 stream_.userFormat = format;
4879 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4881 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4882 stream_.userInterleaved = false;
4884 stream_.userInterleaved = true;
4885 stream_.deviceInterleaved[mode] = true;
4887 // Set flags for buffer conversion.
4888 stream_.doConvertBuffer[mode] = false;
4889 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4890 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4891 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4892 stream_.doConvertBuffer[mode] = true;
4893 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4894 stream_.nUserChannels[mode] > 1 )
4895 stream_.doConvertBuffer[mode] = true;
4897 if ( stream_.doConvertBuffer[mode] )
4898 setConvertInfo( mode, 0 );
4900 // Allocate necessary internal buffers
4901 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4903 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4904 if ( !stream_.userBuffer[mode] ) {
4905 errorType = RtAudioError::MEMORY_ERROR;
4906 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4910 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4911 stream_.callbackInfo.priority = 15;
4913 stream_.callbackInfo.priority = 0;
4915 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4916 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4918 methodResult = SUCCESS;
4922 SAFE_RELEASE( captureDevices );
4923 SAFE_RELEASE( renderDevices );
4924 SAFE_RELEASE( devicePtr );
4925 CoTaskMemFree( deviceFormat );
4927 // if method failed, close the stream
4928 if ( methodResult == FAILURE )
4931 if ( !errorText_.empty() )
4933 return methodResult;
4936 //=============================================================================
4938 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4941 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4946 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4949 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4954 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4957 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4962 //-----------------------------------------------------------------------------
4964 void RtApiWasapi::wasapiThread()
4966 // as this is a new thread, we must CoInitialize it
4967 CoInitialize( NULL );
4971 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4972 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4973 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4974 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4975 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4976 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4978 WAVEFORMATEX* captureFormat = NULL;
4979 WAVEFORMATEX* renderFormat = NULL;
4980 float captureSrRatio = 0.0f;
4981 float renderSrRatio = 0.0f;
4982 WasapiBuffer captureBuffer;
4983 WasapiBuffer renderBuffer;
4984 WasapiResampler* captureResampler = NULL;
4985 WasapiResampler* renderResampler = NULL;
4987 // declare local stream variables
4988 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4989 BYTE* streamBuffer = NULL;
4990 unsigned long captureFlags = 0;
4991 unsigned int bufferFrameCount = 0;
4992 unsigned int numFramesPadding = 0;
4993 unsigned int convBufferSize = 0;
4994 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4995 bool callbackPushed = true;
4996 bool callbackPulled = false;
4997 bool callbackStopped = false;
4998 int callbackResult = 0;
5000 // convBuffer is used to store converted buffers between WASAPI and the user
5001 char* convBuffer = NULL;
5002 unsigned int convBuffSize = 0;
5003 unsigned int deviceBuffSize = 0;
5005 std::string errorText;
5006 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5008 // Attempt to assign "Pro Audio" characteristic to thread
5009 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5011 DWORD taskIndex = 0;
5012 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5013 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5014 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5015 FreeLibrary( AvrtDll );
5018 // start capture stream if applicable
5019 if ( captureAudioClient ) {
5020 hr = captureAudioClient->GetMixFormat( &captureFormat );
5021 if ( FAILED( hr ) ) {
5022 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5026 // init captureResampler
5027 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5028 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5029 captureFormat->nSamplesPerSec, stream_.sampleRate );
5031 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5033 if ( !captureClient ) {
5034 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5035 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5040 if ( FAILED( hr ) ) {
5041 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5045 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5046 ( void** ) &captureClient );
5047 if ( FAILED( hr ) ) {
5048 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5052 // don't configure captureEvent if in loopback mode
5053 if ( !loopbackEnabled )
5055 // configure captureEvent to trigger on every available capture buffer
5056 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5057 if ( !captureEvent ) {
5058 errorType = RtAudioError::SYSTEM_ERROR;
5059 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5063 hr = captureAudioClient->SetEventHandle( captureEvent );
5064 if ( FAILED( hr ) ) {
5065 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5069 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5072 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5074 // reset the capture stream
5075 hr = captureAudioClient->Reset();
5076 if ( FAILED( hr ) ) {
5077 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5081 // start the capture stream
5082 hr = captureAudioClient->Start();
5083 if ( FAILED( hr ) ) {
5084 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5089 unsigned int inBufferSize = 0;
5090 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5091 if ( FAILED( hr ) ) {
5092 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5096 // scale outBufferSize according to stream->user sample rate ratio
5097 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5098 inBufferSize *= stream_.nDeviceChannels[INPUT];
5100 // set captureBuffer size
5101 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5104 // start render stream if applicable
5105 if ( renderAudioClient ) {
5106 hr = renderAudioClient->GetMixFormat( &renderFormat );
5107 if ( FAILED( hr ) ) {
5108 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5112 // init renderResampler
5113 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5114 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5115 stream_.sampleRate, renderFormat->nSamplesPerSec );
5117 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5119 if ( !renderClient ) {
5120 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5121 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5126 if ( FAILED( hr ) ) {
5127 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5131 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5132 ( void** ) &renderClient );
5133 if ( FAILED( hr ) ) {
5134 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5138 // configure renderEvent to trigger on every available render buffer
5139 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5140 if ( !renderEvent ) {
5141 errorType = RtAudioError::SYSTEM_ERROR;
5142 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5146 hr = renderAudioClient->SetEventHandle( renderEvent );
5147 if ( FAILED( hr ) ) {
5148 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5152 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5153 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5155 // reset the render stream
5156 hr = renderAudioClient->Reset();
5157 if ( FAILED( hr ) ) {
5158 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5162 // start the render stream
5163 hr = renderAudioClient->Start();
5164 if ( FAILED( hr ) ) {
5165 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5170 unsigned int outBufferSize = 0;
5171 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5172 if ( FAILED( hr ) ) {
5173 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5177 // scale inBufferSize according to user->stream sample rate ratio
5178 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5179 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5181 // set renderBuffer size
5182 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5185 // malloc buffer memory
5186 if ( stream_.mode == INPUT )
5188 using namespace std; // for ceilf
5189 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5190 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5192 else if ( stream_.mode == OUTPUT )
5194 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5195 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5197 else if ( stream_.mode == DUPLEX )
5199 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5200 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5201 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5202 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5205 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5206 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5207 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5208 if ( !convBuffer || !stream_.deviceBuffer ) {
5209 errorType = RtAudioError::MEMORY_ERROR;
5210 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5214 // stream process loop
5215 while ( stream_.state != STREAM_STOPPING ) {
5216 if ( !callbackPulled ) {
5219 // 1. Pull callback buffer from inputBuffer
5220 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5221 // Convert callback buffer to user format
5223 if ( captureAudioClient )
5225 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5226 if ( captureSrRatio != 1 )
5228 // account for remainders
5233 while ( convBufferSize < stream_.bufferSize )
5235 // Pull callback buffer from inputBuffer
5236 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5237 samplesToPull * stream_.nDeviceChannels[INPUT],
5238 stream_.deviceFormat[INPUT] );
5240 if ( !callbackPulled )
5245 // Convert callback buffer to user sample rate
5246 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5247 unsigned int convSamples = 0;
5249 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5254 convBufferSize += convSamples;
5255 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5258 if ( callbackPulled )
5260 if ( stream_.doConvertBuffer[INPUT] ) {
5261 // Convert callback buffer to user format
5262 convertBuffer( stream_.userBuffer[INPUT],
5263 stream_.deviceBuffer,
5264 stream_.convertInfo[INPUT] );
5267 // no further conversion, simple copy deviceBuffer to userBuffer
5268 memcpy( stream_.userBuffer[INPUT],
5269 stream_.deviceBuffer,
5270 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5275 // if there is no capture stream, set callbackPulled flag
5276 callbackPulled = true;
5281 // 1. Execute user callback method
5282 // 2. Handle return value from callback
5284 // if callback has not requested the stream to stop
5285 if ( callbackPulled && !callbackStopped ) {
5286 // Execute user callback method
5287 callbackResult = callback( stream_.userBuffer[OUTPUT],
5288 stream_.userBuffer[INPUT],
5291 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5292 stream_.callbackInfo.userData );
5295 RtApi::tickStreamTime();
5297 // Handle return value from callback
5298 if ( callbackResult == 1 ) {
5299 // instantiate a thread to stop this thread
5300 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5301 if ( !threadHandle ) {
5302 errorType = RtAudioError::THREAD_ERROR;
5303 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5306 else if ( !CloseHandle( threadHandle ) ) {
5307 errorType = RtAudioError::THREAD_ERROR;
5308 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5312 callbackStopped = true;
5314 else if ( callbackResult == 2 ) {
5315 // instantiate a thread to stop this thread
5316 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5317 if ( !threadHandle ) {
5318 errorType = RtAudioError::THREAD_ERROR;
5319 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5322 else if ( !CloseHandle( threadHandle ) ) {
5323 errorType = RtAudioError::THREAD_ERROR;
5324 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5328 callbackStopped = true;
5335 // 1. Convert callback buffer to stream format
5336 // 2. Convert callback buffer to stream sample rate and channel count
5337 // 3. Push callback buffer into outputBuffer
5339 if ( renderAudioClient && callbackPulled )
5341 // if the last call to renderBuffer.PushBuffer() was successful
5342 if ( callbackPushed || convBufferSize == 0 )
5344 if ( stream_.doConvertBuffer[OUTPUT] )
5346 // Convert callback buffer to stream format
5347 convertBuffer( stream_.deviceBuffer,
5348 stream_.userBuffer[OUTPUT],
5349 stream_.convertInfo[OUTPUT] );
5353 // no further conversion, simple copy userBuffer to deviceBuffer
5354 memcpy( stream_.deviceBuffer,
5355 stream_.userBuffer[OUTPUT],
5356 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5359 // Convert callback buffer to stream sample rate
5360 renderResampler->Convert( convBuffer,
5361 stream_.deviceBuffer,
5366 // Push callback buffer into outputBuffer
5367 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5368 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5369 stream_.deviceFormat[OUTPUT] );
5372 // if there is no render stream, set callbackPushed flag
5373 callbackPushed = true;
5378 // 1. Get capture buffer from stream
5379 // 2. Push capture buffer into inputBuffer
5380 // 3. If 2. was successful: Release capture buffer
5382 if ( captureAudioClient ) {
5383 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5384 if ( !callbackPulled ) {
5385 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5388 // Get capture buffer from stream
5389 hr = captureClient->GetBuffer( &streamBuffer,
5391 &captureFlags, NULL, NULL );
5392 if ( FAILED( hr ) ) {
5393 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5397 if ( bufferFrameCount != 0 ) {
5398 // Push capture buffer into inputBuffer
5399 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5400 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5401 stream_.deviceFormat[INPUT] ) )
5403 // Release capture buffer
5404 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5405 if ( FAILED( hr ) ) {
5406 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5412 // Inform WASAPI that capture was unsuccessful
5413 hr = captureClient->ReleaseBuffer( 0 );
5414 if ( FAILED( hr ) ) {
5415 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5422 // Inform WASAPI that capture was unsuccessful
5423 hr = captureClient->ReleaseBuffer( 0 );
5424 if ( FAILED( hr ) ) {
5425 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5433 // 1. Get render buffer from stream
5434 // 2. Pull next buffer from outputBuffer
5435 // 3. If 2. was successful: Fill render buffer with next buffer
5436 // Release render buffer
5438 if ( renderAudioClient ) {
5439 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5440 if ( callbackPulled && !callbackPushed ) {
5441 WaitForSingleObject( renderEvent, INFINITE );
5444 // Get render buffer from stream
5445 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5451 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5452 if ( FAILED( hr ) ) {
5453 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5457 bufferFrameCount -= numFramesPadding;
5459 if ( bufferFrameCount != 0 ) {
5460 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5461 if ( FAILED( hr ) ) {
5462 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5466 // Pull next buffer from outputBuffer
5467 // Fill render buffer with next buffer
5468 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5469 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5470 stream_.deviceFormat[OUTPUT] ) )
5472 // Release render buffer
5473 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5474 if ( FAILED( hr ) ) {
5475 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5481 // Inform WASAPI that render was unsuccessful
5482 hr = renderClient->ReleaseBuffer( 0, 0 );
5483 if ( FAILED( hr ) ) {
5484 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5491 // Inform WASAPI that render was unsuccessful
5492 hr = renderClient->ReleaseBuffer( 0, 0 );
5493 if ( FAILED( hr ) ) {
5494 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5500 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5501 if ( callbackPushed ) {
5502 // unsetting the callbackPulled flag lets the stream know that
5503 // the audio device is ready for another callback output buffer.
5504 callbackPulled = false;
5511 CoTaskMemFree( captureFormat );
5512 CoTaskMemFree( renderFormat );
5514 free ( convBuffer );
5515 delete renderResampler;
5516 delete captureResampler;
5520 // update stream state
5521 stream_.state = STREAM_STOPPED;
5523 if ( !errorText.empty() )
5525 errorText_ = errorText;
5530 //******************** End of __WINDOWS_WASAPI__ *********************//
5534 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5536 // Modified by Robin Davies, October 2005
5537 // - Improvements to DirectX pointer chasing.
5538 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5539 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5540 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5541 // Changed device query structure for RtAudio 4.0.7, January 2010
5543 #include <windows.h>
5544 #include <process.h>
5545 #include <mmsystem.h>
5549 #include <algorithm>
5551 #if defined(__MINGW32__)
5552 // missing from latest mingw winapi
5553 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5554 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5555 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5556 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5559 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5561 #ifdef _MSC_VER // if Microsoft Visual C++
5562 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5565 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5567 if ( pointer > bufferSize ) pointer -= bufferSize;
5568 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5569 if ( pointer < earlierPointer ) pointer += bufferSize;
5570 return pointer >= earlierPointer && pointer < laterPointer;
5573 // A structure to hold various information related to the DirectSound
5574 // API implementation.
5576 unsigned int drainCounter; // Tracks callback counts when draining
5577 bool internalDrain; // Indicates if stop is initiated from callback or not.
5581 UINT bufferPointer[2];
5582 DWORD dsBufferSize[2];
5583 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5587 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5590 // Declarations for utility functions, callbacks, and structures
5591 // specific to the DirectSound implementation.
5592 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5593 LPCTSTR description,
5597 static const char* getErrorString( int code );
5599 static unsigned __stdcall callbackHandler( void *ptr );
5608 : found(false) { validId[0] = false; validId[1] = false; }
5611 struct DsProbeData {
5613 std::vector<struct DsDevice>* dsDevices;
5616 RtApiDs :: RtApiDs()
5618 // Dsound will run both-threaded. If CoInitialize fails, then just
5619 // accept whatever the mainline chose for a threading model.
5620 coInitialized_ = false;
5621 HRESULT hr = CoInitialize( NULL );
5622 if ( !FAILED( hr ) ) coInitialized_ = true;
5625 RtApiDs :: ~RtApiDs()
5627 if ( stream_.state != STREAM_CLOSED ) closeStream();
5628 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5631 // The DirectSound default output is always the first device.
5632 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5637 // The DirectSound default input is always the first input device,
5638 // which is the first capture device enumerated.
5639 unsigned int RtApiDs :: getDefaultInputDevice( void )
5644 unsigned int RtApiDs :: getDeviceCount( void )
5646 // Set query flag for previously found devices to false, so that we
5647 // can check for any devices that have disappeared.
5648 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5649 dsDevices[i].found = false;
5651 // Query DirectSound devices.
5652 struct DsProbeData probeInfo;
5653 probeInfo.isInput = false;
5654 probeInfo.dsDevices = &dsDevices;
5655 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5656 if ( FAILED( result ) ) {
5657 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5658 errorText_ = errorStream_.str();
5659 error( RtAudioError::WARNING );
5662 // Query DirectSoundCapture devices.
5663 probeInfo.isInput = true;
5664 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5665 if ( FAILED( result ) ) {
5666 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5667 errorText_ = errorStream_.str();
5668 error( RtAudioError::WARNING );
5671 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5672 for ( unsigned int i=0; i<dsDevices.size(); ) {
5673 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5677 return static_cast<unsigned int>(dsDevices.size());
5680 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5682 RtAudio::DeviceInfo info;
5683 info.probed = false;
5685 if ( dsDevices.size() == 0 ) {
5686 // Force a query of all devices
5688 if ( dsDevices.size() == 0 ) {
5689 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5690 error( RtAudioError::INVALID_USE );
5695 if ( device >= dsDevices.size() ) {
5696 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5697 error( RtAudioError::INVALID_USE );
5702 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5704 LPDIRECTSOUND output;
5706 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5707 if ( FAILED( result ) ) {
5708 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5709 errorText_ = errorStream_.str();
5710 error( RtAudioError::WARNING );
5714 outCaps.dwSize = sizeof( outCaps );
5715 result = output->GetCaps( &outCaps );
5716 if ( FAILED( result ) ) {
5718 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5719 errorText_ = errorStream_.str();
5720 error( RtAudioError::WARNING );
5724 // Get output channel information.
5725 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5727 // Get sample rate information.
5728 info.sampleRates.clear();
5729 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5730 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5731 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5732 info.sampleRates.push_back( SAMPLE_RATES[k] );
5734 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5735 info.preferredSampleRate = SAMPLE_RATES[k];
5739 // Get format information.
5740 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5741 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5745 if ( getDefaultOutputDevice() == device )
5746 info.isDefaultOutput = true;
5748 if ( dsDevices[ device ].validId[1] == false ) {
5749 info.name = dsDevices[ device ].name;
5756 LPDIRECTSOUNDCAPTURE input;
5757 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5758 if ( FAILED( result ) ) {
5759 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5760 errorText_ = errorStream_.str();
5761 error( RtAudioError::WARNING );
5766 inCaps.dwSize = sizeof( inCaps );
5767 result = input->GetCaps( &inCaps );
5768 if ( FAILED( result ) ) {
5770 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5771 errorText_ = errorStream_.str();
5772 error( RtAudioError::WARNING );
5776 // Get input channel information.
5777 info.inputChannels = inCaps.dwChannels;
5779 // Get sample rate and format information.
5780 std::vector<unsigned int> rates;
5781 if ( inCaps.dwChannels >= 2 ) {
5782 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5783 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5784 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5785 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5786 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5787 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5788 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5789 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5791 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5792 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5793 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5794 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5795 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5797 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5798 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5799 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5800 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5801 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5804 else if ( inCaps.dwChannels == 1 ) {
5805 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5806 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5807 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5808 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5809 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5810 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5811 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5812 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5814 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5815 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5816 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5817 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5818 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5820 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5821 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5822 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5823 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5824 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5827 else info.inputChannels = 0; // technically, this would be an error
5831 if ( info.inputChannels == 0 ) return info;
5833 // Copy the supported rates to the info structure but avoid duplication.
5835 for ( unsigned int i=0; i<rates.size(); i++ ) {
5837 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5838 if ( rates[i] == info.sampleRates[j] ) {
5843 if ( found == false ) info.sampleRates.push_back( rates[i] );
5845 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5847 // If device opens for both playback and capture, we determine the channels.
5848 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5849 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5851 if ( device == 0 ) info.isDefaultInput = true;
5853 // Copy name and return.
5854 info.name = dsDevices[ device ].name;
5859 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5860 unsigned int firstChannel, unsigned int sampleRate,
5861 RtAudioFormat format, unsigned int *bufferSize,
5862 RtAudio::StreamOptions *options )
5864 if ( channels + firstChannel > 2 ) {
5865 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5869 size_t nDevices = dsDevices.size();
5870 if ( nDevices == 0 ) {
5871 // This should not happen because a check is made before this function is called.
5872 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5876 if ( device >= nDevices ) {
5877 // This should not happen because a check is made before this function is called.
5878 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5882 if ( mode == OUTPUT ) {
5883 if ( dsDevices[ device ].validId[0] == false ) {
5884 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5885 errorText_ = errorStream_.str();
5889 else { // mode == INPUT
5890 if ( dsDevices[ device ].validId[1] == false ) {
5891 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5892 errorText_ = errorStream_.str();
5897 // According to a note in PortAudio, using GetDesktopWindow()
5898 // instead of GetForegroundWindow() is supposed to avoid problems
5899 // that occur when the application's window is not the foreground
5900 // window. Also, if the application window closes before the
5901 // DirectSound buffer, DirectSound can crash. In the past, I had
5902 // problems when using GetDesktopWindow() but it seems fine now
5903 // (January 2010). I'll leave it commented here.
5904 // HWND hWnd = GetForegroundWindow();
5905 HWND hWnd = GetDesktopWindow();
5907 // Check the numberOfBuffers parameter and limit the lowest value to
5908 // two. This is a judgement call and a value of two is probably too
5909 // low for capture, but it should work for playback.
5911 if ( options ) nBuffers = options->numberOfBuffers;
5912 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5913 if ( nBuffers < 2 ) nBuffers = 3;
5915 // Check the lower range of the user-specified buffer size and set
5916 // (arbitrarily) to a lower bound of 32.
5917 if ( *bufferSize < 32 ) *bufferSize = 32;
5919 // Create the wave format structure. The data format setting will
5920 // be determined later.
5921 WAVEFORMATEX waveFormat;
5922 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5923 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5924 waveFormat.nChannels = channels + firstChannel;
5925 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5927 // Determine the device buffer size. By default, we'll use the value
5928 // defined above (32K), but we will grow it to make allowances for
5929 // very large software buffer sizes.
5930 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5931 DWORD dsPointerLeadTime = 0;
5933 void *ohandle = 0, *bhandle = 0;
5935 if ( mode == OUTPUT ) {
5937 LPDIRECTSOUND output;
5938 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5939 if ( FAILED( result ) ) {
5940 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5941 errorText_ = errorStream_.str();
5946 outCaps.dwSize = sizeof( outCaps );
5947 result = output->GetCaps( &outCaps );
5948 if ( FAILED( result ) ) {
5950 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5951 errorText_ = errorStream_.str();
5955 // Check channel information.
5956 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5957 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5958 errorText_ = errorStream_.str();
5962 // Check format information. Use 16-bit format unless not
5963 // supported or user requests 8-bit.
5964 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5965 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5966 waveFormat.wBitsPerSample = 16;
5967 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5970 waveFormat.wBitsPerSample = 8;
5971 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5973 stream_.userFormat = format;
5975 // Update wave format structure and buffer information.
5976 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5977 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5978 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5980 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5981 while ( dsPointerLeadTime * 2U > dsBufferSize )
5984 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5985 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5986 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5987 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5988 if ( FAILED( result ) ) {
5990 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5991 errorText_ = errorStream_.str();
5995 // Even though we will write to the secondary buffer, we need to
5996 // access the primary buffer to set the correct output format
5997 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5998 // buffer description.
5999 DSBUFFERDESC bufferDescription;
6000 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6001 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6002 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6004 // Obtain the primary buffer
6005 LPDIRECTSOUNDBUFFER buffer;
6006 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6007 if ( FAILED( result ) ) {
6009 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6010 errorText_ = errorStream_.str();
6014 // Set the primary DS buffer sound format.
6015 result = buffer->SetFormat( &waveFormat );
6016 if ( FAILED( result ) ) {
6018 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6019 errorText_ = errorStream_.str();
6023 // Setup the secondary DS buffer description.
6024 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6025 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6026 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6027 DSBCAPS_GLOBALFOCUS |
6028 DSBCAPS_GETCURRENTPOSITION2 |
6029 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6030 bufferDescription.dwBufferBytes = dsBufferSize;
6031 bufferDescription.lpwfxFormat = &waveFormat;
6033 // Try to create the secondary DS buffer. If that doesn't work,
6034 // try to use software mixing. Otherwise, there's a problem.
6035 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6036 if ( FAILED( result ) ) {
6037 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6038 DSBCAPS_GLOBALFOCUS |
6039 DSBCAPS_GETCURRENTPOSITION2 |
6040 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6041 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6042 if ( FAILED( result ) ) {
6044 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6045 errorText_ = errorStream_.str();
6050 // Get the buffer size ... might be different from what we specified.
6052 dsbcaps.dwSize = sizeof( DSBCAPS );
6053 result = buffer->GetCaps( &dsbcaps );
6054 if ( FAILED( result ) ) {
6057 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6058 errorText_ = errorStream_.str();
6062 dsBufferSize = dsbcaps.dwBufferBytes;
6064 // Lock the DS buffer
6067 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6068 if ( FAILED( result ) ) {
6071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6072 errorText_ = errorStream_.str();
6076 // Zero the DS buffer
6077 ZeroMemory( audioPtr, dataLen );
6079 // Unlock the DS buffer
6080 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6081 if ( FAILED( result ) ) {
6084 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6085 errorText_ = errorStream_.str();
6089 ohandle = (void *) output;
6090 bhandle = (void *) buffer;
6093 if ( mode == INPUT ) {
6095 LPDIRECTSOUNDCAPTURE input;
6096 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6097 if ( FAILED( result ) ) {
6098 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6099 errorText_ = errorStream_.str();
6104 inCaps.dwSize = sizeof( inCaps );
6105 result = input->GetCaps( &inCaps );
6106 if ( FAILED( result ) ) {
6108 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6109 errorText_ = errorStream_.str();
6113 // Check channel information.
6114 if ( inCaps.dwChannels < channels + firstChannel ) {
6115 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6119 // Check format information. Use 16-bit format unless user
6121 DWORD deviceFormats;
6122 if ( channels + firstChannel == 2 ) {
6123 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6124 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6125 waveFormat.wBitsPerSample = 8;
6126 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6128 else { // assume 16-bit is supported
6129 waveFormat.wBitsPerSample = 16;
6130 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6133 else { // channel == 1
6134 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6135 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6136 waveFormat.wBitsPerSample = 8;
6137 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6139 else { // assume 16-bit is supported
6140 waveFormat.wBitsPerSample = 16;
6141 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6144 stream_.userFormat = format;
6146 // Update wave format structure and buffer information.
6147 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6148 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6149 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6151 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6152 while ( dsPointerLeadTime * 2U > dsBufferSize )
6155 // Setup the secondary DS buffer description.
6156 DSCBUFFERDESC bufferDescription;
6157 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6158 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6159 bufferDescription.dwFlags = 0;
6160 bufferDescription.dwReserved = 0;
6161 bufferDescription.dwBufferBytes = dsBufferSize;
6162 bufferDescription.lpwfxFormat = &waveFormat;
6164 // Create the capture buffer.
6165 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6166 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6167 if ( FAILED( result ) ) {
6169 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6170 errorText_ = errorStream_.str();
6174 // Get the buffer size ... might be different from what we specified.
6176 dscbcaps.dwSize = sizeof( DSCBCAPS );
6177 result = buffer->GetCaps( &dscbcaps );
6178 if ( FAILED( result ) ) {
6181 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6182 errorText_ = errorStream_.str();
6186 dsBufferSize = dscbcaps.dwBufferBytes;
6188 // NOTE: We could have a problem here if this is a duplex stream
6189 // and the play and capture hardware buffer sizes are different
6190 // (I'm actually not sure if that is a problem or not).
6191 // Currently, we are not verifying that.
6193 // Lock the capture buffer
6196 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6197 if ( FAILED( result ) ) {
6200 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6201 errorText_ = errorStream_.str();
6206 ZeroMemory( audioPtr, dataLen );
6208 // Unlock the buffer
6209 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6210 if ( FAILED( result ) ) {
6213 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6214 errorText_ = errorStream_.str();
6218 ohandle = (void *) input;
6219 bhandle = (void *) buffer;
6222 // Set various stream parameters
6223 DsHandle *handle = 0;
6224 stream_.nDeviceChannels[mode] = channels + firstChannel;
6225 stream_.nUserChannels[mode] = channels;
6226 stream_.bufferSize = *bufferSize;
6227 stream_.channelOffset[mode] = firstChannel;
6228 stream_.deviceInterleaved[mode] = true;
6229 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6230 else stream_.userInterleaved = true;
6232 // Set flag for buffer conversion
6233 stream_.doConvertBuffer[mode] = false;
6234 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6235 stream_.doConvertBuffer[mode] = true;
6236 if (stream_.userFormat != stream_.deviceFormat[mode])
6237 stream_.doConvertBuffer[mode] = true;
6238 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6239 stream_.nUserChannels[mode] > 1 )
6240 stream_.doConvertBuffer[mode] = true;
6242 // Allocate necessary internal buffers
6243 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6244 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6245 if ( stream_.userBuffer[mode] == NULL ) {
6246 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6250 if ( stream_.doConvertBuffer[mode] ) {
6252 bool makeBuffer = true;
6253 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6254 if ( mode == INPUT ) {
6255 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6256 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6257 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6262 bufferBytes *= *bufferSize;
6263 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6264 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6265 if ( stream_.deviceBuffer == NULL ) {
6266 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6272 // Allocate our DsHandle structures for the stream.
6273 if ( stream_.apiHandle == 0 ) {
6275 handle = new DsHandle;
6277 catch ( std::bad_alloc& ) {
6278 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6282 // Create a manual-reset event.
6283 handle->condition = CreateEvent( NULL, // no security
6284 TRUE, // manual-reset
6285 FALSE, // non-signaled initially
6287 stream_.apiHandle = (void *) handle;
6290 handle = (DsHandle *) stream_.apiHandle;
6291 handle->id[mode] = ohandle;
6292 handle->buffer[mode] = bhandle;
6293 handle->dsBufferSize[mode] = dsBufferSize;
6294 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6296 stream_.device[mode] = device;
6297 stream_.state = STREAM_STOPPED;
6298 if ( stream_.mode == OUTPUT && mode == INPUT )
6299 // We had already set up an output stream.
6300 stream_.mode = DUPLEX;
6302 stream_.mode = mode;
6303 stream_.nBuffers = nBuffers;
6304 stream_.sampleRate = sampleRate;
6306 // Setup the buffer conversion information structure.
6307 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6309 // Setup the callback thread.
6310 if ( stream_.callbackInfo.isRunning == false ) {
6312 stream_.callbackInfo.isRunning = true;
6313 stream_.callbackInfo.object = (void *) this;
6314 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6315 &stream_.callbackInfo, 0, &threadId );
6316 if ( stream_.callbackInfo.thread == 0 ) {
6317 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6321 // Boost DS thread priority
6322 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6328 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6329 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6330 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6331 if ( buffer ) buffer->Release();
6334 if ( handle->buffer[1] ) {
6335 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6336 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6337 if ( buffer ) buffer->Release();
6340 CloseHandle( handle->condition );
6342 stream_.apiHandle = 0;
6345 for ( int i=0; i<2; i++ ) {
6346 if ( stream_.userBuffer[i] ) {
6347 free( stream_.userBuffer[i] );
6348 stream_.userBuffer[i] = 0;
6352 if ( stream_.deviceBuffer ) {
6353 free( stream_.deviceBuffer );
6354 stream_.deviceBuffer = 0;
6357 stream_.state = STREAM_CLOSED;
6361 void RtApiDs :: closeStream()
6363 if ( stream_.state == STREAM_CLOSED ) {
6364 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6365 error( RtAudioError::WARNING );
6369 // Stop the callback thread.
6370 stream_.callbackInfo.isRunning = false;
6371 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6372 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6374 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6376 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6377 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6378 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6385 if ( handle->buffer[1] ) {
6386 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6387 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6394 CloseHandle( handle->condition );
6396 stream_.apiHandle = 0;
6399 for ( int i=0; i<2; i++ ) {
6400 if ( stream_.userBuffer[i] ) {
6401 free( stream_.userBuffer[i] );
6402 stream_.userBuffer[i] = 0;
6406 if ( stream_.deviceBuffer ) {
6407 free( stream_.deviceBuffer );
6408 stream_.deviceBuffer = 0;
6411 stream_.mode = UNINITIALIZED;
6412 stream_.state = STREAM_CLOSED;
6415 void RtApiDs :: startStream()
6418 if ( stream_.state == STREAM_RUNNING ) {
6419 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6420 error( RtAudioError::WARNING );
6424 #if defined( HAVE_GETTIMEOFDAY )
6425 gettimeofday( &stream_.lastTickTimestamp, NULL );
6428 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6430 // Increase scheduler frequency on lesser windows (a side-effect of
6431 // increasing timer accuracy). On greater windows (Win2K or later),
6432 // this is already in effect.
6433 timeBeginPeriod( 1 );
6435 buffersRolling = false;
6436 duplexPrerollBytes = 0;
6438 if ( stream_.mode == DUPLEX ) {
6439 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6440 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6444 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6446 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6447 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6448 if ( FAILED( result ) ) {
6449 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6450 errorText_ = errorStream_.str();
6455 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6457 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6458 result = buffer->Start( DSCBSTART_LOOPING );
6459 if ( FAILED( result ) ) {
6460 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6461 errorText_ = errorStream_.str();
6466 handle->drainCounter = 0;
6467 handle->internalDrain = false;
6468 ResetEvent( handle->condition );
6469 stream_.state = STREAM_RUNNING;
6472 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6475 void RtApiDs :: stopStream()
6478 if ( stream_.state == STREAM_STOPPED ) {
6479 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6480 error( RtAudioError::WARNING );
6487 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6489 if ( handle->drainCounter == 0 ) {
6490 handle->drainCounter = 2;
6491 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6494 stream_.state = STREAM_STOPPED;
6496 MUTEX_LOCK( &stream_.mutex );
6498 // Stop the buffer and clear memory
6499 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6500 result = buffer->Stop();
6501 if ( FAILED( result ) ) {
6502 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6503 errorText_ = errorStream_.str();
6507 // Lock the buffer and clear it so that if we start to play again,
6508 // we won't have old data playing.
6509 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6510 if ( FAILED( result ) ) {
6511 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6512 errorText_ = errorStream_.str();
6516 // Zero the DS buffer
6517 ZeroMemory( audioPtr, dataLen );
6519 // Unlock the DS buffer
6520 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6521 if ( FAILED( result ) ) {
6522 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6523 errorText_ = errorStream_.str();
6527 // If we start playing again, we must begin at beginning of buffer.
6528 handle->bufferPointer[0] = 0;
6531 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6532 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6536 stream_.state = STREAM_STOPPED;
6538 if ( stream_.mode != DUPLEX )
6539 MUTEX_LOCK( &stream_.mutex );
6541 result = buffer->Stop();
6542 if ( FAILED( result ) ) {
6543 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6544 errorText_ = errorStream_.str();
6548 // Lock the buffer and clear it so that if we start to play again,
6549 // we won't have old data playing.
6550 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6551 if ( FAILED( result ) ) {
6552 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6553 errorText_ = errorStream_.str();
6557 // Zero the DS buffer
6558 ZeroMemory( audioPtr, dataLen );
6560 // Unlock the DS buffer
6561 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6562 if ( FAILED( result ) ) {
6563 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6564 errorText_ = errorStream_.str();
6568 // If we start recording again, we must begin at beginning of buffer.
6569 handle->bufferPointer[1] = 0;
6573 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6574 MUTEX_UNLOCK( &stream_.mutex );
6576 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6579 void RtApiDs :: abortStream()
6582 if ( stream_.state == STREAM_STOPPED ) {
6583 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6584 error( RtAudioError::WARNING );
6588 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6589 handle->drainCounter = 2;
6594 void RtApiDs :: callbackEvent()
6596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6597 Sleep( 50 ); // sleep 50 milliseconds
6601 if ( stream_.state == STREAM_CLOSED ) {
6602 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6603 error( RtAudioError::WARNING );
6607 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6608 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6610 // Check if we were draining the stream and signal is finished.
6611 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6613 stream_.state = STREAM_STOPPING;
6614 if ( handle->internalDrain == false )
6615 SetEvent( handle->condition );
6621 // Invoke user callback to get fresh output data UNLESS we are
6623 if ( handle->drainCounter == 0 ) {
6624 RtAudioCallback callback = (RtAudioCallback) info->callback;
6625 double streamTime = getStreamTime();
6626 RtAudioStreamStatus status = 0;
6627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6629 handle->xrun[0] = false;
6631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6632 status |= RTAUDIO_INPUT_OVERFLOW;
6633 handle->xrun[1] = false;
6635 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6636 stream_.bufferSize, streamTime, status, info->userData );
6637 if ( cbReturnValue == 2 ) {
6638 stream_.state = STREAM_STOPPING;
6639 handle->drainCounter = 2;
6643 else if ( cbReturnValue == 1 ) {
6644 handle->drainCounter = 1;
6645 handle->internalDrain = true;
6650 DWORD currentWritePointer, safeWritePointer;
6651 DWORD currentReadPointer, safeReadPointer;
6652 UINT nextWritePointer;
6654 LPVOID buffer1 = NULL;
6655 LPVOID buffer2 = NULL;
6656 DWORD bufferSize1 = 0;
6657 DWORD bufferSize2 = 0;
6662 MUTEX_LOCK( &stream_.mutex );
6663 if ( stream_.state == STREAM_STOPPED ) {
6664 MUTEX_UNLOCK( &stream_.mutex );
6668 if ( buffersRolling == false ) {
6669 if ( stream_.mode == DUPLEX ) {
6670 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6672 // It takes a while for the devices to get rolling. As a result,
6673 // there's no guarantee that the capture and write device pointers
6674 // will move in lockstep. Wait here for both devices to start
6675 // rolling, and then set our buffer pointers accordingly.
6676 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6677 // bytes later than the write buffer.
6679 // Stub: a serious risk of having a pre-emptive scheduling round
6680 // take place between the two GetCurrentPosition calls... but I'm
6681 // really not sure how to solve the problem. Temporarily boost to
6682 // Realtime priority, maybe; but I'm not sure what priority the
6683 // DirectSound service threads run at. We *should* be roughly
6684 // within a ms or so of correct.
6686 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6687 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6689 DWORD startSafeWritePointer, startSafeReadPointer;
6691 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6692 if ( FAILED( result ) ) {
6693 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6694 errorText_ = errorStream_.str();
6695 MUTEX_UNLOCK( &stream_.mutex );
6696 error( RtAudioError::SYSTEM_ERROR );
6699 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6700 if ( FAILED( result ) ) {
6701 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6702 errorText_ = errorStream_.str();
6703 MUTEX_UNLOCK( &stream_.mutex );
6704 error( RtAudioError::SYSTEM_ERROR );
6708 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6709 if ( FAILED( result ) ) {
6710 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6711 errorText_ = errorStream_.str();
6712 MUTEX_UNLOCK( &stream_.mutex );
6713 error( RtAudioError::SYSTEM_ERROR );
6716 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6717 if ( FAILED( result ) ) {
6718 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6719 errorText_ = errorStream_.str();
6720 MUTEX_UNLOCK( &stream_.mutex );
6721 error( RtAudioError::SYSTEM_ERROR );
6724 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6728 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6730 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6731 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6732 handle->bufferPointer[1] = safeReadPointer;
6734 else if ( stream_.mode == OUTPUT ) {
6736 // Set the proper nextWritePosition after initial startup.
6737 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6738 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6739 if ( FAILED( result ) ) {
6740 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6741 errorText_ = errorStream_.str();
6742 MUTEX_UNLOCK( &stream_.mutex );
6743 error( RtAudioError::SYSTEM_ERROR );
6746 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6747 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6750 buffersRolling = true;
6753 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6755 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6757 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6758 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6759 bufferBytes *= formatBytes( stream_.userFormat );
6760 memset( stream_.userBuffer[0], 0, bufferBytes );
6763 // Setup parameters and do buffer conversion if necessary.
6764 if ( stream_.doConvertBuffer[0] ) {
6765 buffer = stream_.deviceBuffer;
6766 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6767 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6768 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6771 buffer = stream_.userBuffer[0];
6772 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6773 bufferBytes *= formatBytes( stream_.userFormat );
6776 // No byte swapping necessary in DirectSound implementation.
6778 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6779 // unsigned. So, we need to convert our signed 8-bit data here to
6781 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6782 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6784 DWORD dsBufferSize = handle->dsBufferSize[0];
6785 nextWritePointer = handle->bufferPointer[0];
6787 DWORD endWrite, leadPointer;
6789 // Find out where the read and "safe write" pointers are.
6790 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6791 if ( FAILED( result ) ) {
6792 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6793 errorText_ = errorStream_.str();
6794 MUTEX_UNLOCK( &stream_.mutex );
6795 error( RtAudioError::SYSTEM_ERROR );
6799 // We will copy our output buffer into the region between
6800 // safeWritePointer and leadPointer. If leadPointer is not
6801 // beyond the next endWrite position, wait until it is.
6802 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6803 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6804 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6805 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6806 endWrite = nextWritePointer + bufferBytes;
6808 // Check whether the entire write region is behind the play pointer.
6809 if ( leadPointer >= endWrite ) break;
6811 // If we are here, then we must wait until the leadPointer advances
6812 // beyond the end of our next write region. We use the
6813 // Sleep() function to suspend operation until that happens.
6814 double millis = ( endWrite - leadPointer ) * 1000.0;
6815 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6816 if ( millis < 1.0 ) millis = 1.0;
6817 Sleep( (DWORD) millis );
6820 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6821 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6822 // We've strayed into the forbidden zone ... resync the read pointer.
6823 handle->xrun[0] = true;
6824 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6825 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6826 handle->bufferPointer[0] = nextWritePointer;
6827 endWrite = nextWritePointer + bufferBytes;
6830 // Lock free space in the buffer
6831 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6832 &bufferSize1, &buffer2, &bufferSize2, 0 );
6833 if ( FAILED( result ) ) {
6834 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6835 errorText_ = errorStream_.str();
6836 MUTEX_UNLOCK( &stream_.mutex );
6837 error( RtAudioError::SYSTEM_ERROR );
6841 // Copy our buffer into the DS buffer
6842 CopyMemory( buffer1, buffer, bufferSize1 );
6843 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6845 // Update our buffer offset and unlock sound buffer
6846 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6847 if ( FAILED( result ) ) {
6848 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6849 errorText_ = errorStream_.str();
6850 MUTEX_UNLOCK( &stream_.mutex );
6851 error( RtAudioError::SYSTEM_ERROR );
6854 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6855 handle->bufferPointer[0] = nextWritePointer;
6858 // Don't bother draining input
6859 if ( handle->drainCounter ) {
6860 handle->drainCounter++;
6864 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6866 // Setup parameters.
6867 if ( stream_.doConvertBuffer[1] ) {
6868 buffer = stream_.deviceBuffer;
6869 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6870 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6873 buffer = stream_.userBuffer[1];
6874 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6875 bufferBytes *= formatBytes( stream_.userFormat );
6878 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6879 long nextReadPointer = handle->bufferPointer[1];
6880 DWORD dsBufferSize = handle->dsBufferSize[1];
6882 // Find out where the write and "safe read" pointers are.
6883 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6884 if ( FAILED( result ) ) {
6885 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6886 errorText_ = errorStream_.str();
6887 MUTEX_UNLOCK( &stream_.mutex );
6888 error( RtAudioError::SYSTEM_ERROR );
6892 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6893 DWORD endRead = nextReadPointer + bufferBytes;
6895 // Handling depends on whether we are INPUT or DUPLEX.
6896 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6897 // then a wait here will drag the write pointers into the forbidden zone.
6899 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6900 // it's in a safe position. This causes dropouts, but it seems to be the only
6901 // practical way to sync up the read and write pointers reliably, given the
6902 // the very complex relationship between phase and increment of the read and write
6905 // In order to minimize audible dropouts in DUPLEX mode, we will
6906 // provide a pre-roll period of 0.5 seconds in which we return
6907 // zeros from the read buffer while the pointers sync up.
6909 if ( stream_.mode == DUPLEX ) {
6910 if ( safeReadPointer < endRead ) {
6911 if ( duplexPrerollBytes <= 0 ) {
6912 // Pre-roll time over. Be more agressive.
6913 int adjustment = endRead-safeReadPointer;
6915 handle->xrun[1] = true;
6917 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6918 // and perform fine adjustments later.
6919 // - small adjustments: back off by twice as much.
6920 if ( adjustment >= 2*bufferBytes )
6921 nextReadPointer = safeReadPointer-2*bufferBytes;
6923 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6925 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6929 // In pre=roll time. Just do it.
6930 nextReadPointer = safeReadPointer - bufferBytes;
6931 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6933 endRead = nextReadPointer + bufferBytes;
6936 else { // mode == INPUT
6937 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6938 // See comments for playback.
6939 double millis = (endRead - safeReadPointer) * 1000.0;
6940 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6941 if ( millis < 1.0 ) millis = 1.0;
6942 Sleep( (DWORD) millis );
6944 // Wake up and find out where we are now.
6945 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6946 if ( FAILED( result ) ) {
6947 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6948 errorText_ = errorStream_.str();
6949 MUTEX_UNLOCK( &stream_.mutex );
6950 error( RtAudioError::SYSTEM_ERROR );
6954 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6958 // Lock free space in the buffer
6959 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6960 &bufferSize1, &buffer2, &bufferSize2, 0 );
6961 if ( FAILED( result ) ) {
6962 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6963 errorText_ = errorStream_.str();
6964 MUTEX_UNLOCK( &stream_.mutex );
6965 error( RtAudioError::SYSTEM_ERROR );
6969 if ( duplexPrerollBytes <= 0 ) {
6970 // Copy our buffer into the DS buffer
6971 CopyMemory( buffer, buffer1, bufferSize1 );
6972 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6975 memset( buffer, 0, bufferSize1 );
6976 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6977 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6980 // Update our buffer offset and unlock sound buffer
6981 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6982 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6983 if ( FAILED( result ) ) {
6984 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6985 errorText_ = errorStream_.str();
6986 MUTEX_UNLOCK( &stream_.mutex );
6987 error( RtAudioError::SYSTEM_ERROR );
6990 handle->bufferPointer[1] = nextReadPointer;
6992 // No byte swapping necessary in DirectSound implementation.
6994 // If necessary, convert 8-bit data from unsigned to signed.
6995 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6996 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6998 // Do buffer conversion if necessary.
6999 if ( stream_.doConvertBuffer[1] )
7000 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7004 MUTEX_UNLOCK( &stream_.mutex );
7005 RtApi::tickStreamTime();
7008 // Definitions for utility functions and callbacks
7009 // specific to the DirectSound implementation.
7011 static unsigned __stdcall callbackHandler( void *ptr )
7013 CallbackInfo *info = (CallbackInfo *) ptr;
7014 RtApiDs *object = (RtApiDs *) info->object;
7015 bool* isRunning = &info->isRunning;
7017 while ( *isRunning == true ) {
7018 object->callbackEvent();
7025 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7026 LPCTSTR description,
7030 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7031 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7034 bool validDevice = false;
7035 if ( probeInfo.isInput == true ) {
7037 LPDIRECTSOUNDCAPTURE object;
7039 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7040 if ( hr != DS_OK ) return TRUE;
7042 caps.dwSize = sizeof(caps);
7043 hr = object->GetCaps( &caps );
7044 if ( hr == DS_OK ) {
7045 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7052 LPDIRECTSOUND object;
7053 hr = DirectSoundCreate( lpguid, &object, NULL );
7054 if ( hr != DS_OK ) return TRUE;
7056 caps.dwSize = sizeof(caps);
7057 hr = object->GetCaps( &caps );
7058 if ( hr == DS_OK ) {
7059 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7065 // If good device, then save its name and guid.
7066 std::string name = convertCharPointerToStdString( description );
7067 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7068 if ( lpguid == NULL )
7069 name = "Default Device";
7070 if ( validDevice ) {
7071 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7072 if ( dsDevices[i].name == name ) {
7073 dsDevices[i].found = true;
7074 if ( probeInfo.isInput ) {
7075 dsDevices[i].id[1] = lpguid;
7076 dsDevices[i].validId[1] = true;
7079 dsDevices[i].id[0] = lpguid;
7080 dsDevices[i].validId[0] = true;
7088 device.found = true;
7089 if ( probeInfo.isInput ) {
7090 device.id[1] = lpguid;
7091 device.validId[1] = true;
7094 device.id[0] = lpguid;
7095 device.validId[0] = true;
7097 dsDevices.push_back( device );
7103 static const char* getErrorString( int code )
7107 case DSERR_ALLOCATED:
7108 return "Already allocated";
7110 case DSERR_CONTROLUNAVAIL:
7111 return "Control unavailable";
7113 case DSERR_INVALIDPARAM:
7114 return "Invalid parameter";
7116 case DSERR_INVALIDCALL:
7117 return "Invalid call";
7120 return "Generic error";
7122 case DSERR_PRIOLEVELNEEDED:
7123 return "Priority level needed";
7125 case DSERR_OUTOFMEMORY:
7126 return "Out of memory";
7128 case DSERR_BADFORMAT:
7129 return "The sample rate or the channel format is not supported";
7131 case DSERR_UNSUPPORTED:
7132 return "Not supported";
7134 case DSERR_NODRIVER:
7137 case DSERR_ALREADYINITIALIZED:
7138 return "Already initialized";
7140 case DSERR_NOAGGREGATION:
7141 return "No aggregation";
7143 case DSERR_BUFFERLOST:
7144 return "Buffer lost";
7146 case DSERR_OTHERAPPHASPRIO:
7147 return "Another application already has priority";
7149 case DSERR_UNINITIALIZED:
7150 return "Uninitialized";
7153 return "DirectSound unknown error";
7156 //******************** End of __WINDOWS_DS__ *********************//
7160 #if defined(__LINUX_ALSA__)
7162 #include <alsa/asoundlib.h>
7165 // A structure to hold various information related to the ALSA API
7168 snd_pcm_t *handles[2];
7171 pthread_cond_t runnable_cv;
7175 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7178 static void *alsaCallbackHandler( void * ptr );
7180 RtApiAlsa :: RtApiAlsa()
7182 // Nothing to do here.
7185 RtApiAlsa :: ~RtApiAlsa()
7187 if ( stream_.state != STREAM_CLOSED ) closeStream();
7190 unsigned int RtApiAlsa :: getDeviceCount( void )
7192 unsigned nDevices = 0;
7193 int result, subdevice, card;
7195 snd_ctl_t *handle = 0;
7197 // Count cards and devices
7199 snd_card_next( &card );
7200 while ( card >= 0 ) {
7201 sprintf( name, "hw:%d", card );
7202 result = snd_ctl_open( &handle, name, 0 );
7205 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7206 errorText_ = errorStream_.str();
7207 error( RtAudioError::WARNING );
7212 result = snd_ctl_pcm_next_device( handle, &subdevice );
7214 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7215 errorText_ = errorStream_.str();
7216 error( RtAudioError::WARNING );
7219 if ( subdevice < 0 )
7225 snd_ctl_close( handle );
7226 snd_card_next( &card );
7229 result = snd_ctl_open( &handle, "default", 0 );
7232 snd_ctl_close( handle );
7238 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7240 RtAudio::DeviceInfo info;
7241 info.probed = false;
7243 unsigned nDevices = 0;
7244 int result, subdevice, card;
7246 snd_ctl_t *chandle = 0;
7248 // Count cards and devices
7251 snd_card_next( &card );
7252 while ( card >= 0 ) {
7253 sprintf( name, "hw:%d", card );
7254 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7257 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7258 errorText_ = errorStream_.str();
7259 error( RtAudioError::WARNING );
7264 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7266 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7267 errorText_ = errorStream_.str();
7268 error( RtAudioError::WARNING );
7271 if ( subdevice < 0 ) break;
7272 if ( nDevices == device ) {
7273 sprintf( name, "hw:%d,%d", card, subdevice );
7280 snd_ctl_close( chandle );
7281 snd_card_next( &card );
7284 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7285 if ( result == 0 ) {
7286 if ( nDevices == device ) {
7287 strcpy( name, "default" );
7293 if ( nDevices == 0 ) {
7294 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7295 error( RtAudioError::INVALID_USE );
7299 if ( device >= nDevices ) {
7300 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7301 error( RtAudioError::INVALID_USE );
7307 // If a stream is already open, we cannot probe the stream devices.
7308 // Thus, use the saved results.
7309 if ( stream_.state != STREAM_CLOSED &&
7310 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7311 snd_ctl_close( chandle );
7312 if ( device >= devices_.size() ) {
7313 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7314 error( RtAudioError::WARNING );
7317 return devices_[ device ];
7320 int openMode = SND_PCM_ASYNC;
7321 snd_pcm_stream_t stream;
7322 snd_pcm_info_t *pcminfo;
7323 snd_pcm_info_alloca( &pcminfo );
7325 snd_pcm_hw_params_t *params;
7326 snd_pcm_hw_params_alloca( ¶ms );
7328 // First try for playback unless default device (which has subdev -1)
7329 stream = SND_PCM_STREAM_PLAYBACK;
7330 snd_pcm_info_set_stream( pcminfo, stream );
7331 if ( subdevice != -1 ) {
7332 snd_pcm_info_set_device( pcminfo, subdevice );
7333 snd_pcm_info_set_subdevice( pcminfo, 0 );
7335 result = snd_ctl_pcm_info( chandle, pcminfo );
7337 // Device probably doesn't support playback.
7342 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7344 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7345 errorText_ = errorStream_.str();
7346 error( RtAudioError::WARNING );
7350 // The device is open ... fill the parameter structure.
7351 result = snd_pcm_hw_params_any( phandle, params );
7353 snd_pcm_close( phandle );
7354 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7355 errorText_ = errorStream_.str();
7356 error( RtAudioError::WARNING );
7360 // Get output channel information.
7362 result = snd_pcm_hw_params_get_channels_max( params, &value );
7364 snd_pcm_close( phandle );
7365 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7366 errorText_ = errorStream_.str();
7367 error( RtAudioError::WARNING );
7370 info.outputChannels = value;
7371 snd_pcm_close( phandle );
7374 stream = SND_PCM_STREAM_CAPTURE;
7375 snd_pcm_info_set_stream( pcminfo, stream );
7377 // Now try for capture unless default device (with subdev = -1)
7378 if ( subdevice != -1 ) {
7379 result = snd_ctl_pcm_info( chandle, pcminfo );
7380 snd_ctl_close( chandle );
7382 // Device probably doesn't support capture.
7383 if ( info.outputChannels == 0 ) return info;
7384 goto probeParameters;
7388 snd_ctl_close( chandle );
7390 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7392 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7393 errorText_ = errorStream_.str();
7394 error( RtAudioError::WARNING );
7395 if ( info.outputChannels == 0 ) return info;
7396 goto probeParameters;
7399 // The device is open ... fill the parameter structure.
7400 result = snd_pcm_hw_params_any( phandle, params );
7402 snd_pcm_close( phandle );
7403 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7404 errorText_ = errorStream_.str();
7405 error( RtAudioError::WARNING );
7406 if ( info.outputChannels == 0 ) return info;
7407 goto probeParameters;
7410 result = snd_pcm_hw_params_get_channels_max( params, &value );
7412 snd_pcm_close( phandle );
7413 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7414 errorText_ = errorStream_.str();
7415 error( RtAudioError::WARNING );
7416 if ( info.outputChannels == 0 ) return info;
7417 goto probeParameters;
7419 info.inputChannels = value;
7420 snd_pcm_close( phandle );
7422 // If device opens for both playback and capture, we determine the channels.
7423 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7424 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7426 // ALSA doesn't provide default devices so we'll use the first available one.
7427 if ( device == 0 && info.outputChannels > 0 )
7428 info.isDefaultOutput = true;
7429 if ( device == 0 && info.inputChannels > 0 )
7430 info.isDefaultInput = true;
7433 // At this point, we just need to figure out the supported data
7434 // formats and sample rates. We'll proceed by opening the device in
7435 // the direction with the maximum number of channels, or playback if
7436 // they are equal. This might limit our sample rate options, but so
7439 if ( info.outputChannels >= info.inputChannels )
7440 stream = SND_PCM_STREAM_PLAYBACK;
7442 stream = SND_PCM_STREAM_CAPTURE;
7443 snd_pcm_info_set_stream( pcminfo, stream );
7445 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7447 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7448 errorText_ = errorStream_.str();
7449 error( RtAudioError::WARNING );
7453 // The device is open ... fill the parameter structure.
7454 result = snd_pcm_hw_params_any( phandle, params );
7456 snd_pcm_close( phandle );
7457 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7458 errorText_ = errorStream_.str();
7459 error( RtAudioError::WARNING );
7463 // Test our discrete set of sample rate values.
7464 info.sampleRates.clear();
7465 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7466 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7467 info.sampleRates.push_back( SAMPLE_RATES[i] );
7469 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7470 info.preferredSampleRate = SAMPLE_RATES[i];
7473 if ( info.sampleRates.size() == 0 ) {
7474 snd_pcm_close( phandle );
7475 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7476 errorText_ = errorStream_.str();
7477 error( RtAudioError::WARNING );
7481 // Probe the supported data formats ... we don't care about endian-ness just yet
7482 snd_pcm_format_t format;
7483 info.nativeFormats = 0;
7484 format = SND_PCM_FORMAT_S8;
7485 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7486 info.nativeFormats |= RTAUDIO_SINT8;
7487 format = SND_PCM_FORMAT_S16;
7488 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7489 info.nativeFormats |= RTAUDIO_SINT16;
7490 format = SND_PCM_FORMAT_S24;
7491 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7492 info.nativeFormats |= RTAUDIO_SINT24;
7493 format = SND_PCM_FORMAT_S32;
7494 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7495 info.nativeFormats |= RTAUDIO_SINT32;
7496 format = SND_PCM_FORMAT_FLOAT;
7497 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7498 info.nativeFormats |= RTAUDIO_FLOAT32;
7499 format = SND_PCM_FORMAT_FLOAT64;
7500 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7501 info.nativeFormats |= RTAUDIO_FLOAT64;
7503 // Check that we have at least one supported format
7504 if ( info.nativeFormats == 0 ) {
7505 snd_pcm_close( phandle );
7506 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7507 errorText_ = errorStream_.str();
7508 error( RtAudioError::WARNING );
7512 // Get the device name
7514 result = snd_card_get_name( card, &cardname );
7515 if ( result >= 0 ) {
7516 sprintf( name, "hw:%s,%d", cardname, subdevice );
7521 // That's all ... close the device and return
7522 snd_pcm_close( phandle );
7527 void RtApiAlsa :: saveDeviceInfo( void )
7531 unsigned int nDevices = getDeviceCount();
7532 devices_.resize( nDevices );
7533 for ( unsigned int i=0; i<nDevices; i++ )
7534 devices_[i] = getDeviceInfo( i );
7537 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7538 unsigned int firstChannel, unsigned int sampleRate,
7539 RtAudioFormat format, unsigned int *bufferSize,
7540 RtAudio::StreamOptions *options )
7543 #if defined(__RTAUDIO_DEBUG__)
7545 snd_output_stdio_attach(&out, stderr, 0);
7548 // I'm not using the "plug" interface ... too much inconsistent behavior.
7550 unsigned nDevices = 0;
7551 int result, subdevice, card;
7555 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7556 snprintf(name, sizeof(name), "%s", "default");
7558 // Count cards and devices
7560 snd_card_next( &card );
7561 while ( card >= 0 ) {
7562 sprintf( name, "hw:%d", card );
7563 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7565 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7566 errorText_ = errorStream_.str();
7571 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7572 if ( result < 0 ) break;
7573 if ( subdevice < 0 ) break;
7574 if ( nDevices == device ) {
7575 sprintf( name, "hw:%d,%d", card, subdevice );
7576 snd_ctl_close( chandle );
7581 snd_ctl_close( chandle );
7582 snd_card_next( &card );
7585 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7586 if ( result == 0 ) {
7587 if ( nDevices == device ) {
7588 strcpy( name, "default" );
7589 snd_ctl_close( chandle );
7594 snd_ctl_close( chandle );
7596 if ( nDevices == 0 ) {
7597 // This should not happen because a check is made before this function is called.
7598 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7602 if ( device >= nDevices ) {
7603 // This should not happen because a check is made before this function is called.
7604 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7611 // The getDeviceInfo() function will not work for a device that is
7612 // already open. Thus, we'll probe the system before opening a
7613 // stream and save the results for use by getDeviceInfo().
7614 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7615 this->saveDeviceInfo();
7617 snd_pcm_stream_t stream;
7618 if ( mode == OUTPUT )
7619 stream = SND_PCM_STREAM_PLAYBACK;
7621 stream = SND_PCM_STREAM_CAPTURE;
7624 int openMode = SND_PCM_ASYNC;
7625 result = snd_pcm_open( &phandle, name, stream, openMode );
7627 if ( mode == OUTPUT )
7628 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7630 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7631 errorText_ = errorStream_.str();
7635 // Fill the parameter structure.
7636 snd_pcm_hw_params_t *hw_params;
7637 snd_pcm_hw_params_alloca( &hw_params );
7638 result = snd_pcm_hw_params_any( phandle, hw_params );
7640 snd_pcm_close( phandle );
7641 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7642 errorText_ = errorStream_.str();
7646 #if defined(__RTAUDIO_DEBUG__)
7647 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7648 snd_pcm_hw_params_dump( hw_params, out );
7651 // Set access ... check user preference.
7652 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7653 stream_.userInterleaved = false;
7654 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7656 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7657 stream_.deviceInterleaved[mode] = true;
7660 stream_.deviceInterleaved[mode] = false;
7663 stream_.userInterleaved = true;
7664 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7666 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7667 stream_.deviceInterleaved[mode] = false;
7670 stream_.deviceInterleaved[mode] = true;
7674 snd_pcm_close( phandle );
7675 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7676 errorText_ = errorStream_.str();
7680 // Determine how to set the device format.
7681 stream_.userFormat = format;
7682 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7684 if ( format == RTAUDIO_SINT8 )
7685 deviceFormat = SND_PCM_FORMAT_S8;
7686 else if ( format == RTAUDIO_SINT16 )
7687 deviceFormat = SND_PCM_FORMAT_S16;
7688 else if ( format == RTAUDIO_SINT24 )
7689 deviceFormat = SND_PCM_FORMAT_S24;
7690 else if ( format == RTAUDIO_SINT32 )
7691 deviceFormat = SND_PCM_FORMAT_S32;
7692 else if ( format == RTAUDIO_FLOAT32 )
7693 deviceFormat = SND_PCM_FORMAT_FLOAT;
7694 else if ( format == RTAUDIO_FLOAT64 )
7695 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7697 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7698 stream_.deviceFormat[mode] = format;
7702 // The user requested format is not natively supported by the device.
7703 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7704 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7705 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7709 deviceFormat = SND_PCM_FORMAT_FLOAT;
7710 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7711 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7715 deviceFormat = SND_PCM_FORMAT_S32;
7716 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7717 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7721 deviceFormat = SND_PCM_FORMAT_S24;
7722 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7723 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7727 deviceFormat = SND_PCM_FORMAT_S16;
7728 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7729 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7733 deviceFormat = SND_PCM_FORMAT_S8;
7734 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7735 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7739 // If we get here, no supported format was found.
7740 snd_pcm_close( phandle );
7741 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7742 errorText_ = errorStream_.str();
7746 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7748 snd_pcm_close( phandle );
7749 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7750 errorText_ = errorStream_.str();
7754 // Determine whether byte-swaping is necessary.
7755 stream_.doByteSwap[mode] = false;
7756 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7757 result = snd_pcm_format_cpu_endian( deviceFormat );
7759 stream_.doByteSwap[mode] = true;
7760 else if (result < 0) {
7761 snd_pcm_close( phandle );
7762 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7763 errorText_ = errorStream_.str();
7768 // Set the sample rate.
7769 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7771 snd_pcm_close( phandle );
7772 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7773 errorText_ = errorStream_.str();
7777 // Determine the number of channels for this device. We support a possible
7778 // minimum device channel number > than the value requested by the user.
7779 stream_.nUserChannels[mode] = channels;
7781 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7782 unsigned int deviceChannels = value;
7783 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7784 snd_pcm_close( phandle );
7785 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7786 errorText_ = errorStream_.str();
7790 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7792 snd_pcm_close( phandle );
7793 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7794 errorText_ = errorStream_.str();
7797 deviceChannels = value;
7798 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7799 stream_.nDeviceChannels[mode] = deviceChannels;
7801 // Set the device channels.
7802 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7804 snd_pcm_close( phandle );
7805 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7806 errorText_ = errorStream_.str();
7810 // Set the buffer (or period) size.
7812 snd_pcm_uframes_t periodSize = *bufferSize;
7813 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7815 snd_pcm_close( phandle );
7816 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7817 errorText_ = errorStream_.str();
7820 *bufferSize = periodSize;
7822 // Set the buffer number, which in ALSA is referred to as the "period".
7823 unsigned int periods = 0;
7824 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7825 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7826 if ( periods < 2 ) periods = 4; // a fairly safe default value
7827 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7829 snd_pcm_close( phandle );
7830 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7831 errorText_ = errorStream_.str();
7835 // If attempting to setup a duplex stream, the bufferSize parameter
7836 // MUST be the same in both directions!
7837 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7838 snd_pcm_close( phandle );
7839 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7840 errorText_ = errorStream_.str();
7844 stream_.bufferSize = *bufferSize;
7846 // Install the hardware configuration
7847 result = snd_pcm_hw_params( phandle, hw_params );
7849 snd_pcm_close( phandle );
7850 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7851 errorText_ = errorStream_.str();
7855 #if defined(__RTAUDIO_DEBUG__)
7856 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7857 snd_pcm_hw_params_dump( hw_params, out );
7860 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7861 snd_pcm_sw_params_t *sw_params = NULL;
7862 snd_pcm_sw_params_alloca( &sw_params );
7863 snd_pcm_sw_params_current( phandle, sw_params );
7864 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7865 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7866 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7868 // The following two settings were suggested by Theo Veenker
7869 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7870 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7872 // here are two options for a fix
7873 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7874 snd_pcm_uframes_t val;
7875 snd_pcm_sw_params_get_boundary( sw_params, &val );
7876 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7878 result = snd_pcm_sw_params( phandle, sw_params );
7880 snd_pcm_close( phandle );
7881 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7882 errorText_ = errorStream_.str();
7886 #if defined(__RTAUDIO_DEBUG__)
7887 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7888 snd_pcm_sw_params_dump( sw_params, out );
7891 // Set flags for buffer conversion
7892 stream_.doConvertBuffer[mode] = false;
7893 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7894 stream_.doConvertBuffer[mode] = true;
7895 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7896 stream_.doConvertBuffer[mode] = true;
7897 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7898 stream_.nUserChannels[mode] > 1 )
7899 stream_.doConvertBuffer[mode] = true;
7901 // Allocate the ApiHandle if necessary and then save.
7902 AlsaHandle *apiInfo = 0;
7903 if ( stream_.apiHandle == 0 ) {
7905 apiInfo = (AlsaHandle *) new AlsaHandle;
7907 catch ( std::bad_alloc& ) {
7908 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7912 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7913 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7917 stream_.apiHandle = (void *) apiInfo;
7918 apiInfo->handles[0] = 0;
7919 apiInfo->handles[1] = 0;
7922 apiInfo = (AlsaHandle *) stream_.apiHandle;
7924 apiInfo->handles[mode] = phandle;
7927 // Allocate necessary internal buffers.
7928 unsigned long bufferBytes;
7929 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7930 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7931 if ( stream_.userBuffer[mode] == NULL ) {
7932 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7936 if ( stream_.doConvertBuffer[mode] ) {
7938 bool makeBuffer = true;
7939 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7940 if ( mode == INPUT ) {
7941 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7942 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7943 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7948 bufferBytes *= *bufferSize;
7949 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7950 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7951 if ( stream_.deviceBuffer == NULL ) {
7952 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7958 stream_.sampleRate = sampleRate;
7959 stream_.nBuffers = periods;
7960 stream_.device[mode] = device;
7961 stream_.state = STREAM_STOPPED;
7963 // Setup the buffer conversion information structure.
7964 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7966 // Setup thread if necessary.
7967 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7968 // We had already set up an output stream.
7969 stream_.mode = DUPLEX;
7970 // Link the streams if possible.
7971 apiInfo->synchronized = false;
7972 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7973 apiInfo->synchronized = true;
7975 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7976 error( RtAudioError::WARNING );
7980 stream_.mode = mode;
7982 // Setup callback thread.
7983 stream_.callbackInfo.object = (void *) this;
7985 // Set the thread attributes for joinable and realtime scheduling
7986 // priority (optional). The higher priority will only take affect
7987 // if the program is run as root or suid. Note, under Linux
7988 // processes with CAP_SYS_NICE privilege, a user can change
7989 // scheduling policy and priority (thus need not be root). See
7990 // POSIX "capabilities".
7991 pthread_attr_t attr;
7992 pthread_attr_init( &attr );
7993 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7994 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7995 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7996 stream_.callbackInfo.doRealtime = true;
7997 struct sched_param param;
7998 int priority = options->priority;
7999 int min = sched_get_priority_min( SCHED_RR );
8000 int max = sched_get_priority_max( SCHED_RR );
8001 if ( priority < min ) priority = min;
8002 else if ( priority > max ) priority = max;
8003 param.sched_priority = priority;
8005 // Set the policy BEFORE the priority. Otherwise it fails.
8006 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8007 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8008 // This is definitely required. Otherwise it fails.
8009 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8010 pthread_attr_setschedparam(&attr, ¶m);
8013 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8015 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8018 stream_.callbackInfo.isRunning = true;
8019 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8020 pthread_attr_destroy( &attr );
8022 // Failed. Try instead with default attributes.
8023 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8025 stream_.callbackInfo.isRunning = false;
8026 errorText_ = "RtApiAlsa::error creating callback thread!";
8036 pthread_cond_destroy( &apiInfo->runnable_cv );
8037 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8038 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8040 stream_.apiHandle = 0;
8043 if ( phandle) snd_pcm_close( phandle );
8045 for ( int i=0; i<2; i++ ) {
8046 if ( stream_.userBuffer[i] ) {
8047 free( stream_.userBuffer[i] );
8048 stream_.userBuffer[i] = 0;
8052 if ( stream_.deviceBuffer ) {
8053 free( stream_.deviceBuffer );
8054 stream_.deviceBuffer = 0;
8057 stream_.state = STREAM_CLOSED;
8061 void RtApiAlsa :: closeStream()
8063 if ( stream_.state == STREAM_CLOSED ) {
8064 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8065 error( RtAudioError::WARNING );
8069 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8070 stream_.callbackInfo.isRunning = false;
8071 MUTEX_LOCK( &stream_.mutex );
8072 if ( stream_.state == STREAM_STOPPED ) {
8073 apiInfo->runnable = true;
8074 pthread_cond_signal( &apiInfo->runnable_cv );
8076 MUTEX_UNLOCK( &stream_.mutex );
8077 pthread_join( stream_.callbackInfo.thread, NULL );
8079 if ( stream_.state == STREAM_RUNNING ) {
8080 stream_.state = STREAM_STOPPED;
8081 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8082 snd_pcm_drop( apiInfo->handles[0] );
8083 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8084 snd_pcm_drop( apiInfo->handles[1] );
8088 pthread_cond_destroy( &apiInfo->runnable_cv );
8089 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8090 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8092 stream_.apiHandle = 0;
8095 for ( int i=0; i<2; i++ ) {
8096 if ( stream_.userBuffer[i] ) {
8097 free( stream_.userBuffer[i] );
8098 stream_.userBuffer[i] = 0;
8102 if ( stream_.deviceBuffer ) {
8103 free( stream_.deviceBuffer );
8104 stream_.deviceBuffer = 0;
8107 stream_.mode = UNINITIALIZED;
8108 stream_.state = STREAM_CLOSED;
8111 void RtApiAlsa :: startStream()
8113 // This method calls snd_pcm_prepare if the device isn't already in that state.
8116 if ( stream_.state == STREAM_RUNNING ) {
8117 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8118 error( RtAudioError::WARNING );
8122 MUTEX_LOCK( &stream_.mutex );
8124 #if defined( HAVE_GETTIMEOFDAY )
8125 gettimeofday( &stream_.lastTickTimestamp, NULL );
8129 snd_pcm_state_t state;
8130 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8131 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8132 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8133 state = snd_pcm_state( handle[0] );
8134 if ( state != SND_PCM_STATE_PREPARED ) {
8135 result = snd_pcm_prepare( handle[0] );
8137 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8138 errorText_ = errorStream_.str();
8144 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8145 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8146 state = snd_pcm_state( handle[1] );
8147 if ( state != SND_PCM_STATE_PREPARED ) {
8148 result = snd_pcm_prepare( handle[1] );
8150 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8151 errorText_ = errorStream_.str();
8157 stream_.state = STREAM_RUNNING;
8160 apiInfo->runnable = true;
8161 pthread_cond_signal( &apiInfo->runnable_cv );
8162 MUTEX_UNLOCK( &stream_.mutex );
8164 if ( result >= 0 ) return;
8165 error( RtAudioError::SYSTEM_ERROR );
8168 void RtApiAlsa :: stopStream()
8171 if ( stream_.state == STREAM_STOPPED ) {
8172 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8173 error( RtAudioError::WARNING );
8177 stream_.state = STREAM_STOPPED;
8178 MUTEX_LOCK( &stream_.mutex );
8181 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8182 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8183 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8184 if ( apiInfo->synchronized )
8185 result = snd_pcm_drop( handle[0] );
8187 result = snd_pcm_drain( handle[0] );
8189 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8190 errorText_ = errorStream_.str();
8195 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8196 result = snd_pcm_drop( handle[1] );
8198 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8199 errorText_ = errorStream_.str();
8205 apiInfo->runnable = false; // fixes high CPU usage when stopped
8206 MUTEX_UNLOCK( &stream_.mutex );
8208 if ( result >= 0 ) return;
8209 error( RtAudioError::SYSTEM_ERROR );
8212 void RtApiAlsa :: abortStream()
8215 if ( stream_.state == STREAM_STOPPED ) {
8216 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8217 error( RtAudioError::WARNING );
8221 stream_.state = STREAM_STOPPED;
8222 MUTEX_LOCK( &stream_.mutex );
8225 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8226 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8227 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8228 result = snd_pcm_drop( handle[0] );
8230 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8231 errorText_ = errorStream_.str();
8236 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8237 result = snd_pcm_drop( handle[1] );
8239 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8240 errorText_ = errorStream_.str();
8246 apiInfo->runnable = false; // fixes high CPU usage when stopped
8247 MUTEX_UNLOCK( &stream_.mutex );
8249 if ( result >= 0 ) return;
8250 error( RtAudioError::SYSTEM_ERROR );
8253 void RtApiAlsa :: callbackEvent()
8255 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8256 if ( stream_.state == STREAM_STOPPED ) {
8257 MUTEX_LOCK( &stream_.mutex );
8258 while ( !apiInfo->runnable )
8259 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8261 if ( stream_.state != STREAM_RUNNING ) {
8262 MUTEX_UNLOCK( &stream_.mutex );
8265 MUTEX_UNLOCK( &stream_.mutex );
8268 if ( stream_.state == STREAM_CLOSED ) {
8269 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8270 error( RtAudioError::WARNING );
8274 int doStopStream = 0;
8275 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8276 double streamTime = getStreamTime();
8277 RtAudioStreamStatus status = 0;
8278 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8279 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8280 apiInfo->xrun[0] = false;
8282 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8283 status |= RTAUDIO_INPUT_OVERFLOW;
8284 apiInfo->xrun[1] = false;
8286 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8287 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8289 if ( doStopStream == 2 ) {
8294 MUTEX_LOCK( &stream_.mutex );
8296 // The state might change while waiting on a mutex.
8297 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8303 snd_pcm_sframes_t frames;
8304 RtAudioFormat format;
8305 handle = (snd_pcm_t **) apiInfo->handles;
8307 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8309 // Setup parameters.
8310 if ( stream_.doConvertBuffer[1] ) {
8311 buffer = stream_.deviceBuffer;
8312 channels = stream_.nDeviceChannels[1];
8313 format = stream_.deviceFormat[1];
8316 buffer = stream_.userBuffer[1];
8317 channels = stream_.nUserChannels[1];
8318 format = stream_.userFormat;
8321 // Read samples from device in interleaved/non-interleaved format.
8322 if ( stream_.deviceInterleaved[1] )
8323 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8325 void *bufs[channels];
8326 size_t offset = stream_.bufferSize * formatBytes( format );
8327 for ( int i=0; i<channels; i++ )
8328 bufs[i] = (void *) (buffer + (i * offset));
8329 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8332 if ( result < (int) stream_.bufferSize ) {
8333 // Either an error or overrun occured.
8334 if ( result == -EPIPE ) {
8335 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8336 if ( state == SND_PCM_STATE_XRUN ) {
8337 apiInfo->xrun[1] = true;
8338 result = snd_pcm_prepare( handle[1] );
8340 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8341 errorText_ = errorStream_.str();
8345 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8346 errorText_ = errorStream_.str();
8350 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8351 errorText_ = errorStream_.str();
8353 error( RtAudioError::WARNING );
8357 // Do byte swapping if necessary.
8358 if ( stream_.doByteSwap[1] )
8359 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8361 // Do buffer conversion if necessary.
8362 if ( stream_.doConvertBuffer[1] )
8363 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8365 // Check stream latency
8366 result = snd_pcm_delay( handle[1], &frames );
8367 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8372 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8374 // Setup parameters and do buffer conversion if necessary.
8375 if ( stream_.doConvertBuffer[0] ) {
8376 buffer = stream_.deviceBuffer;
8377 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8378 channels = stream_.nDeviceChannels[0];
8379 format = stream_.deviceFormat[0];
8382 buffer = stream_.userBuffer[0];
8383 channels = stream_.nUserChannels[0];
8384 format = stream_.userFormat;
8387 // Do byte swapping if necessary.
8388 if ( stream_.doByteSwap[0] )
8389 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8391 // Write samples to device in interleaved/non-interleaved format.
8392 if ( stream_.deviceInterleaved[0] )
8393 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8395 void *bufs[channels];
8396 size_t offset = stream_.bufferSize * formatBytes( format );
8397 for ( int i=0; i<channels; i++ )
8398 bufs[i] = (void *) (buffer + (i * offset));
8399 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8402 if ( result < (int) stream_.bufferSize ) {
8403 // Either an error or underrun occured.
8404 if ( result == -EPIPE ) {
8405 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8406 if ( state == SND_PCM_STATE_XRUN ) {
8407 apiInfo->xrun[0] = true;
8408 result = snd_pcm_prepare( handle[0] );
8410 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8411 errorText_ = errorStream_.str();
8414 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8417 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8418 errorText_ = errorStream_.str();
8422 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8423 errorText_ = errorStream_.str();
8425 error( RtAudioError::WARNING );
8429 // Check stream latency
8430 result = snd_pcm_delay( handle[0], &frames );
8431 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8435 MUTEX_UNLOCK( &stream_.mutex );
8437 RtApi::tickStreamTime();
8438 if ( doStopStream == 1 ) this->stopStream();
8441 static void *alsaCallbackHandler( void *ptr )
8443 CallbackInfo *info = (CallbackInfo *) ptr;
8444 RtApiAlsa *object = (RtApiAlsa *) info->object;
8445 bool *isRunning = &info->isRunning;
8447 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8448 if ( info->doRealtime ) {
8449 std::cerr << "RtAudio alsa: " <<
8450 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8451 "running realtime scheduling" << std::endl;
8455 while ( *isRunning == true ) {
8456 pthread_testcancel();
8457 object->callbackEvent();
8460 pthread_exit( NULL );
8463 //******************** End of __LINUX_ALSA__ *********************//
8466 #if defined(__LINUX_PULSE__)
8468 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8469 // and Tristan Matthews.
8471 #include <pulse/error.h>
8472 #include <pulse/simple.h>
8475 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8476 44100, 48000, 96000, 0};
8478 struct rtaudio_pa_format_mapping_t {
8479 RtAudioFormat rtaudio_format;
8480 pa_sample_format_t pa_format;
8483 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8484 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8485 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8486 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8487 {0, PA_SAMPLE_INVALID}};
8489 struct PulseAudioHandle {
8493 pthread_cond_t runnable_cv;
8495 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8498 RtApiPulse::~RtApiPulse()
8500 if ( stream_.state != STREAM_CLOSED )
8504 unsigned int RtApiPulse::getDeviceCount( void )
8509 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8511 RtAudio::DeviceInfo info;
8513 info.name = "PulseAudio";
8514 info.outputChannels = 2;
8515 info.inputChannels = 2;
8516 info.duplexChannels = 2;
8517 info.isDefaultOutput = true;
8518 info.isDefaultInput = true;
8520 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8521 info.sampleRates.push_back( *sr );
8523 info.preferredSampleRate = 48000;
8524 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8529 static void *pulseaudio_callback( void * user )
8531 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8532 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8533 volatile bool *isRunning = &cbi->isRunning;
8535 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8536 if (cbi->doRealtime) {
8537 std::cerr << "RtAudio pulse: " <<
8538 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8539 "running realtime scheduling" << std::endl;
8543 while ( *isRunning ) {
8544 pthread_testcancel();
8545 context->callbackEvent();
8548 pthread_exit( NULL );
8551 void RtApiPulse::closeStream( void )
8553 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8555 stream_.callbackInfo.isRunning = false;
8557 MUTEX_LOCK( &stream_.mutex );
8558 if ( stream_.state == STREAM_STOPPED ) {
8559 pah->runnable = true;
8560 pthread_cond_signal( &pah->runnable_cv );
8562 MUTEX_UNLOCK( &stream_.mutex );
8564 pthread_join( pah->thread, 0 );
8565 if ( pah->s_play ) {
8566 pa_simple_flush( pah->s_play, NULL );
8567 pa_simple_free( pah->s_play );
8570 pa_simple_free( pah->s_rec );
8572 pthread_cond_destroy( &pah->runnable_cv );
8574 stream_.apiHandle = 0;
8577 if ( stream_.userBuffer[0] ) {
8578 free( stream_.userBuffer[0] );
8579 stream_.userBuffer[0] = 0;
8581 if ( stream_.userBuffer[1] ) {
8582 free( stream_.userBuffer[1] );
8583 stream_.userBuffer[1] = 0;
8586 stream_.state = STREAM_CLOSED;
8587 stream_.mode = UNINITIALIZED;
8590 void RtApiPulse::callbackEvent( void )
8592 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8594 if ( stream_.state == STREAM_STOPPED ) {
8595 MUTEX_LOCK( &stream_.mutex );
8596 while ( !pah->runnable )
8597 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8599 if ( stream_.state != STREAM_RUNNING ) {
8600 MUTEX_UNLOCK( &stream_.mutex );
8603 MUTEX_UNLOCK( &stream_.mutex );
8606 if ( stream_.state == STREAM_CLOSED ) {
8607 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8608 "this shouldn't happen!";
8609 error( RtAudioError::WARNING );
8613 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8614 double streamTime = getStreamTime();
8615 RtAudioStreamStatus status = 0;
8616 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8617 stream_.bufferSize, streamTime, status,
8618 stream_.callbackInfo.userData );
8620 if ( doStopStream == 2 ) {
8625 MUTEX_LOCK( &stream_.mutex );
8626 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8627 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8629 if ( stream_.state != STREAM_RUNNING )
8634 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8635 if ( stream_.doConvertBuffer[OUTPUT] ) {
8636 convertBuffer( stream_.deviceBuffer,
8637 stream_.userBuffer[OUTPUT],
8638 stream_.convertInfo[OUTPUT] );
8639 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8640 formatBytes( stream_.deviceFormat[OUTPUT] );
8642 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8643 formatBytes( stream_.userFormat );
8645 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8646 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8647 pa_strerror( pa_error ) << ".";
8648 errorText_ = errorStream_.str();
8649 error( RtAudioError::WARNING );
8653 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8654 if ( stream_.doConvertBuffer[INPUT] )
8655 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8656 formatBytes( stream_.deviceFormat[INPUT] );
8658 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8659 formatBytes( stream_.userFormat );
8661 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8662 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8663 pa_strerror( pa_error ) << ".";
8664 errorText_ = errorStream_.str();
8665 error( RtAudioError::WARNING );
8667 if ( stream_.doConvertBuffer[INPUT] ) {
8668 convertBuffer( stream_.userBuffer[INPUT],
8669 stream_.deviceBuffer,
8670 stream_.convertInfo[INPUT] );
8675 MUTEX_UNLOCK( &stream_.mutex );
8676 RtApi::tickStreamTime();
8678 if ( doStopStream == 1 )
8682 void RtApiPulse::startStream( void )
8684 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8686 if ( stream_.state == STREAM_CLOSED ) {
8687 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8688 error( RtAudioError::INVALID_USE );
8691 if ( stream_.state == STREAM_RUNNING ) {
8692 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8693 error( RtAudioError::WARNING );
8697 MUTEX_LOCK( &stream_.mutex );
8699 #if defined( HAVE_GETTIMEOFDAY )
8700 gettimeofday( &stream_.lastTickTimestamp, NULL );
8703 stream_.state = STREAM_RUNNING;
8705 pah->runnable = true;
8706 pthread_cond_signal( &pah->runnable_cv );
8707 MUTEX_UNLOCK( &stream_.mutex );
8710 void RtApiPulse::stopStream( void )
8712 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8714 if ( stream_.state == STREAM_CLOSED ) {
8715 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8716 error( RtAudioError::INVALID_USE );
8719 if ( stream_.state == STREAM_STOPPED ) {
8720 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8721 error( RtAudioError::WARNING );
8725 stream_.state = STREAM_STOPPED;
8726 MUTEX_LOCK( &stream_.mutex );
8728 if ( pah && pah->s_play ) {
8730 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8731 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8732 pa_strerror( pa_error ) << ".";
8733 errorText_ = errorStream_.str();
8734 MUTEX_UNLOCK( &stream_.mutex );
8735 error( RtAudioError::SYSTEM_ERROR );
8740 stream_.state = STREAM_STOPPED;
8741 MUTEX_UNLOCK( &stream_.mutex );
8744 void RtApiPulse::abortStream( void )
8746 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8748 if ( stream_.state == STREAM_CLOSED ) {
8749 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8750 error( RtAudioError::INVALID_USE );
8753 if ( stream_.state == STREAM_STOPPED ) {
8754 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8755 error( RtAudioError::WARNING );
8759 stream_.state = STREAM_STOPPED;
8760 MUTEX_LOCK( &stream_.mutex );
8762 if ( pah && pah->s_play ) {
8764 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8765 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8766 pa_strerror( pa_error ) << ".";
8767 errorText_ = errorStream_.str();
8768 MUTEX_UNLOCK( &stream_.mutex );
8769 error( RtAudioError::SYSTEM_ERROR );
8774 stream_.state = STREAM_STOPPED;
8775 MUTEX_UNLOCK( &stream_.mutex );
8778 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8779 unsigned int channels, unsigned int firstChannel,
8780 unsigned int sampleRate, RtAudioFormat format,
8781 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8783 PulseAudioHandle *pah = 0;
8784 unsigned long bufferBytes = 0;
8787 if ( device != 0 ) return false;
8788 if ( mode != INPUT && mode != OUTPUT ) return false;
8789 if ( channels != 1 && channels != 2 ) {
8790 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8793 ss.channels = channels;
8795 if ( firstChannel != 0 ) return false;
8797 bool sr_found = false;
8798 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8799 if ( sampleRate == *sr ) {
8801 stream_.sampleRate = sampleRate;
8802 ss.rate = sampleRate;
8807 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8812 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8813 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8814 if ( format == sf->rtaudio_format ) {
8816 stream_.userFormat = sf->rtaudio_format;
8817 stream_.deviceFormat[mode] = stream_.userFormat;
8818 ss.format = sf->pa_format;
8822 if ( !sf_found ) { // Use internal data format conversion.
8823 stream_.userFormat = format;
8824 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8825 ss.format = PA_SAMPLE_FLOAT32LE;
8828 // Set other stream parameters.
8829 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8830 else stream_.userInterleaved = true;
8831 stream_.deviceInterleaved[mode] = true;
8832 stream_.nBuffers = 1;
8833 stream_.doByteSwap[mode] = false;
8834 stream_.nUserChannels[mode] = channels;
8835 stream_.nDeviceChannels[mode] = channels + firstChannel;
8836 stream_.channelOffset[mode] = 0;
8837 std::string streamName = "RtAudio";
8839 // Set flags for buffer conversion.
8840 stream_.doConvertBuffer[mode] = false;
8841 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8842 stream_.doConvertBuffer[mode] = true;
8843 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8844 stream_.doConvertBuffer[mode] = true;
8846 // Allocate necessary internal buffers.
8847 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8848 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8849 if ( stream_.userBuffer[mode] == NULL ) {
8850 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8853 stream_.bufferSize = *bufferSize;
8855 if ( stream_.doConvertBuffer[mode] ) {
8857 bool makeBuffer = true;
8858 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8859 if ( mode == INPUT ) {
8860 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8861 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8862 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8867 bufferBytes *= *bufferSize;
8868 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8869 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8870 if ( stream_.deviceBuffer == NULL ) {
8871 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8877 stream_.device[mode] = device;
8879 // Setup the buffer conversion information structure.
8880 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8882 if ( !stream_.apiHandle ) {
8883 PulseAudioHandle *pah = new PulseAudioHandle;
8885 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8889 stream_.apiHandle = pah;
8890 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8891 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8895 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8898 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8901 pa_buffer_attr buffer_attr;
8902 buffer_attr.fragsize = bufferBytes;
8903 buffer_attr.maxlength = -1;
8905 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8906 if ( !pah->s_rec ) {
8907 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8912 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8913 if ( !pah->s_play ) {
8914 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8922 if ( stream_.mode == UNINITIALIZED )
8923 stream_.mode = mode;
8924 else if ( stream_.mode == mode )
8927 stream_.mode = DUPLEX;
8929 if ( !stream_.callbackInfo.isRunning ) {
8930 stream_.callbackInfo.object = this;
8932 stream_.state = STREAM_STOPPED;
8933 // Set the thread attributes for joinable and realtime scheduling
8934 // priority (optional). The higher priority will only take affect
8935 // if the program is run as root or suid. Note, under Linux
8936 // processes with CAP_SYS_NICE privilege, a user can change
8937 // scheduling policy and priority (thus need not be root). See
8938 // POSIX "capabilities".
8939 pthread_attr_t attr;
8940 pthread_attr_init( &attr );
8941 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8942 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8943 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8944 stream_.callbackInfo.doRealtime = true;
8945 struct sched_param param;
8946 int priority = options->priority;
8947 int min = sched_get_priority_min( SCHED_RR );
8948 int max = sched_get_priority_max( SCHED_RR );
8949 if ( priority < min ) priority = min;
8950 else if ( priority > max ) priority = max;
8951 param.sched_priority = priority;
8953 // Set the policy BEFORE the priority. Otherwise it fails.
8954 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8955 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8956 // This is definitely required. Otherwise it fails.
8957 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8958 pthread_attr_setschedparam(&attr, ¶m);
8961 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8963 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8966 stream_.callbackInfo.isRunning = true;
8967 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8968 pthread_attr_destroy(&attr);
8970 // Failed. Try instead with default attributes.
8971 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8973 stream_.callbackInfo.isRunning = false;
8974 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8983 if ( pah && stream_.callbackInfo.isRunning ) {
8984 pthread_cond_destroy( &pah->runnable_cv );
8986 stream_.apiHandle = 0;
8989 for ( int i=0; i<2; i++ ) {
8990 if ( stream_.userBuffer[i] ) {
8991 free( stream_.userBuffer[i] );
8992 stream_.userBuffer[i] = 0;
8996 if ( stream_.deviceBuffer ) {
8997 free( stream_.deviceBuffer );
8998 stream_.deviceBuffer = 0;
9001 stream_.state = STREAM_CLOSED;
9005 //******************** End of __LINUX_PULSE__ *********************//
9008 #if defined(__LINUX_OSS__)
9011 #include <sys/ioctl.h>
9014 #include <sys/soundcard.h>
9018 static void *ossCallbackHandler(void * ptr);
9020 // A structure to hold various information related to the OSS API
9023 int id[2]; // device ids
9026 pthread_cond_t runnable;
9029 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9032 RtApiOss :: RtApiOss()
9034 // Nothing to do here.
9037 RtApiOss :: ~RtApiOss()
9039 if ( stream_.state != STREAM_CLOSED ) closeStream();
9042 unsigned int RtApiOss :: getDeviceCount( void )
9044 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9045 if ( mixerfd == -1 ) {
9046 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9047 error( RtAudioError::WARNING );
9051 oss_sysinfo sysinfo;
9052 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9054 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9055 error( RtAudioError::WARNING );
9060 return sysinfo.numaudios;
9063 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9065 RtAudio::DeviceInfo info;
9066 info.probed = false;
9068 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9069 if ( mixerfd == -1 ) {
9070 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9071 error( RtAudioError::WARNING );
9075 oss_sysinfo sysinfo;
9076 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9077 if ( result == -1 ) {
9079 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9080 error( RtAudioError::WARNING );
9084 unsigned nDevices = sysinfo.numaudios;
9085 if ( nDevices == 0 ) {
9087 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9088 error( RtAudioError::INVALID_USE );
9092 if ( device >= nDevices ) {
9094 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9095 error( RtAudioError::INVALID_USE );
9099 oss_audioinfo ainfo;
9101 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9103 if ( result == -1 ) {
9104 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9105 errorText_ = errorStream_.str();
9106 error( RtAudioError::WARNING );
9111 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9112 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9113 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9114 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9115 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9118 // Probe data formats ... do for input
9119 unsigned long mask = ainfo.iformats;
9120 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9121 info.nativeFormats |= RTAUDIO_SINT16;
9122 if ( mask & AFMT_S8 )
9123 info.nativeFormats |= RTAUDIO_SINT8;
9124 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9125 info.nativeFormats |= RTAUDIO_SINT32;
9127 if ( mask & AFMT_FLOAT )
9128 info.nativeFormats |= RTAUDIO_FLOAT32;
9130 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9131 info.nativeFormats |= RTAUDIO_SINT24;
9133 // Check that we have at least one supported format
9134 if ( info.nativeFormats == 0 ) {
9135 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9136 errorText_ = errorStream_.str();
9137 error( RtAudioError::WARNING );
9141 // Probe the supported sample rates.
9142 info.sampleRates.clear();
9143 if ( ainfo.nrates ) {
9144 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9145 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9146 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9147 info.sampleRates.push_back( SAMPLE_RATES[k] );
9149 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9150 info.preferredSampleRate = SAMPLE_RATES[k];
9158 // Check min and max rate values;
9159 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9160 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9161 info.sampleRates.push_back( SAMPLE_RATES[k] );
9163 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9164 info.preferredSampleRate = SAMPLE_RATES[k];
9169 if ( info.sampleRates.size() == 0 ) {
9170 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9171 errorText_ = errorStream_.str();
9172 error( RtAudioError::WARNING );
9176 info.name = ainfo.name;
9183 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9184 unsigned int firstChannel, unsigned int sampleRate,
9185 RtAudioFormat format, unsigned int *bufferSize,
9186 RtAudio::StreamOptions *options )
9188 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9189 if ( mixerfd == -1 ) {
9190 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9194 oss_sysinfo sysinfo;
9195 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9196 if ( result == -1 ) {
9198 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9202 unsigned nDevices = sysinfo.numaudios;
9203 if ( nDevices == 0 ) {
9204 // This should not happen because a check is made before this function is called.
9206 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9210 if ( device >= nDevices ) {
9211 // This should not happen because a check is made before this function is called.
9213 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9217 oss_audioinfo ainfo;
9219 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9221 if ( result == -1 ) {
9222 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9223 errorText_ = errorStream_.str();
9227 // Check if device supports input or output
9228 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9229 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9230 if ( mode == OUTPUT )
9231 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9233 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9234 errorText_ = errorStream_.str();
9239 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9240 if ( mode == OUTPUT )
9242 else { // mode == INPUT
9243 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9244 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9245 close( handle->id[0] );
9247 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9248 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9249 errorText_ = errorStream_.str();
9252 // Check that the number previously set channels is the same.
9253 if ( stream_.nUserChannels[0] != channels ) {
9254 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9255 errorText_ = errorStream_.str();
9264 // Set exclusive access if specified.
9265 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9267 // Try to open the device.
9269 fd = open( ainfo.devnode, flags, 0 );
9271 if ( errno == EBUSY )
9272 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9274 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9275 errorText_ = errorStream_.str();
9279 // For duplex operation, specifically set this mode (this doesn't seem to work).
9281 if ( flags | O_RDWR ) {
9282 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9283 if ( result == -1) {
9284 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9285 errorText_ = errorStream_.str();
9291 // Check the device channel support.
9292 stream_.nUserChannels[mode] = channels;
9293 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9295 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9296 errorText_ = errorStream_.str();
9300 // Set the number of channels.
9301 int deviceChannels = channels + firstChannel;
9302 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9303 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9305 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9306 errorText_ = errorStream_.str();
9309 stream_.nDeviceChannels[mode] = deviceChannels;
9311 // Get the data format mask
9313 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9314 if ( result == -1 ) {
9316 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9317 errorText_ = errorStream_.str();
9321 // Determine how to set the device format.
9322 stream_.userFormat = format;
9323 int deviceFormat = -1;
9324 stream_.doByteSwap[mode] = false;
9325 if ( format == RTAUDIO_SINT8 ) {
9326 if ( mask & AFMT_S8 ) {
9327 deviceFormat = AFMT_S8;
9328 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9331 else if ( format == RTAUDIO_SINT16 ) {
9332 if ( mask & AFMT_S16_NE ) {
9333 deviceFormat = AFMT_S16_NE;
9334 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9336 else if ( mask & AFMT_S16_OE ) {
9337 deviceFormat = AFMT_S16_OE;
9338 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9339 stream_.doByteSwap[mode] = true;
9342 else if ( format == RTAUDIO_SINT24 ) {
9343 if ( mask & AFMT_S24_NE ) {
9344 deviceFormat = AFMT_S24_NE;
9345 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9347 else if ( mask & AFMT_S24_OE ) {
9348 deviceFormat = AFMT_S24_OE;
9349 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9350 stream_.doByteSwap[mode] = true;
9353 else if ( format == RTAUDIO_SINT32 ) {
9354 if ( mask & AFMT_S32_NE ) {
9355 deviceFormat = AFMT_S32_NE;
9356 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9358 else if ( mask & AFMT_S32_OE ) {
9359 deviceFormat = AFMT_S32_OE;
9360 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9361 stream_.doByteSwap[mode] = true;
9365 if ( deviceFormat == -1 ) {
9366 // The user requested format is not natively supported by the device.
9367 if ( mask & AFMT_S16_NE ) {
9368 deviceFormat = AFMT_S16_NE;
9369 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9371 else if ( mask & AFMT_S32_NE ) {
9372 deviceFormat = AFMT_S32_NE;
9373 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9375 else if ( mask & AFMT_S24_NE ) {
9376 deviceFormat = AFMT_S24_NE;
9377 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9379 else if ( mask & AFMT_S16_OE ) {
9380 deviceFormat = AFMT_S16_OE;
9381 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9382 stream_.doByteSwap[mode] = true;
9384 else if ( mask & AFMT_S32_OE ) {
9385 deviceFormat = AFMT_S32_OE;
9386 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9387 stream_.doByteSwap[mode] = true;
9389 else if ( mask & AFMT_S24_OE ) {
9390 deviceFormat = AFMT_S24_OE;
9391 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9392 stream_.doByteSwap[mode] = true;
9394 else if ( mask & AFMT_S8) {
9395 deviceFormat = AFMT_S8;
9396 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9400 if ( stream_.deviceFormat[mode] == 0 ) {
9401 // This really shouldn't happen ...
9403 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9404 errorText_ = errorStream_.str();
9408 // Set the data format.
9409 int temp = deviceFormat;
9410 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9411 if ( result == -1 || deviceFormat != temp ) {
9413 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9414 errorText_ = errorStream_.str();
9418 // Attempt to set the buffer size. According to OSS, the minimum
9419 // number of buffers is two. The supposed minimum buffer size is 16
9420 // bytes, so that will be our lower bound. The argument to this
9421 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9422 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9423 // We'll check the actual value used near the end of the setup
9425 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9426 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9428 if ( options ) buffers = options->numberOfBuffers;
9429 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9430 if ( buffers < 2 ) buffers = 3;
9431 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9432 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9433 if ( result == -1 ) {
9435 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9436 errorText_ = errorStream_.str();
9439 stream_.nBuffers = buffers;
9441 // Save buffer size (in sample frames).
9442 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9443 stream_.bufferSize = *bufferSize;
9445 // Set the sample rate.
9446 int srate = sampleRate;
9447 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9448 if ( result == -1 ) {
9450 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9451 errorText_ = errorStream_.str();
9455 // Verify the sample rate setup worked.
9456 if ( abs( srate - (int)sampleRate ) > 100 ) {
9458 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9459 errorText_ = errorStream_.str();
9462 stream_.sampleRate = sampleRate;
9464 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9465 // We're doing duplex setup here.
9466 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9467 stream_.nDeviceChannels[0] = deviceChannels;
9470 // Set interleaving parameters.
9471 stream_.userInterleaved = true;
9472 stream_.deviceInterleaved[mode] = true;
9473 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9474 stream_.userInterleaved = false;
9476 // Set flags for buffer conversion
9477 stream_.doConvertBuffer[mode] = false;
9478 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9479 stream_.doConvertBuffer[mode] = true;
9480 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9481 stream_.doConvertBuffer[mode] = true;
9482 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9483 stream_.nUserChannels[mode] > 1 )
9484 stream_.doConvertBuffer[mode] = true;
9486 // Allocate the stream handles if necessary and then save.
9487 if ( stream_.apiHandle == 0 ) {
9489 handle = new OssHandle;
9491 catch ( std::bad_alloc& ) {
9492 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9496 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9497 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9501 stream_.apiHandle = (void *) handle;
9504 handle = (OssHandle *) stream_.apiHandle;
9506 handle->id[mode] = fd;
9508 // Allocate necessary internal buffers.
9509 unsigned long bufferBytes;
9510 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9511 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9512 if ( stream_.userBuffer[mode] == NULL ) {
9513 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9517 if ( stream_.doConvertBuffer[mode] ) {
9519 bool makeBuffer = true;
9520 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9521 if ( mode == INPUT ) {
9522 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9523 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9524 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9529 bufferBytes *= *bufferSize;
9530 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9531 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9532 if ( stream_.deviceBuffer == NULL ) {
9533 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9539 stream_.device[mode] = device;
9540 stream_.state = STREAM_STOPPED;
9542 // Setup the buffer conversion information structure.
9543 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9545 // Setup thread if necessary.
9546 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9547 // We had already set up an output stream.
9548 stream_.mode = DUPLEX;
9549 if ( stream_.device[0] == device ) handle->id[0] = fd;
9552 stream_.mode = mode;
9554 // Setup callback thread.
9555 stream_.callbackInfo.object = (void *) this;
9557 // Set the thread attributes for joinable and realtime scheduling
9558 // priority. The higher priority will only take affect if the
9559 // program is run as root or suid.
9560 pthread_attr_t attr;
9561 pthread_attr_init( &attr );
9562 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9563 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9564 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9565 stream_.callbackInfo.doRealtime = true;
9566 struct sched_param param;
9567 int priority = options->priority;
9568 int min = sched_get_priority_min( SCHED_RR );
9569 int max = sched_get_priority_max( SCHED_RR );
9570 if ( priority < min ) priority = min;
9571 else if ( priority > max ) priority = max;
9572 param.sched_priority = priority;
9574 // Set the policy BEFORE the priority. Otherwise it fails.
9575 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9576 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9577 // This is definitely required. Otherwise it fails.
9578 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9579 pthread_attr_setschedparam(&attr, ¶m);
9582 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9584 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9587 stream_.callbackInfo.isRunning = true;
9588 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9589 pthread_attr_destroy( &attr );
9591 // Failed. Try instead with default attributes.
9592 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9594 stream_.callbackInfo.isRunning = false;
9595 errorText_ = "RtApiOss::error creating callback thread!";
9605 pthread_cond_destroy( &handle->runnable );
9606 if ( handle->id[0] ) close( handle->id[0] );
9607 if ( handle->id[1] ) close( handle->id[1] );
9609 stream_.apiHandle = 0;
9612 for ( int i=0; i<2; i++ ) {
9613 if ( stream_.userBuffer[i] ) {
9614 free( stream_.userBuffer[i] );
9615 stream_.userBuffer[i] = 0;
9619 if ( stream_.deviceBuffer ) {
9620 free( stream_.deviceBuffer );
9621 stream_.deviceBuffer = 0;
9624 stream_.state = STREAM_CLOSED;
9628 void RtApiOss :: closeStream()
9630 if ( stream_.state == STREAM_CLOSED ) {
9631 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9632 error( RtAudioError::WARNING );
9636 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9637 stream_.callbackInfo.isRunning = false;
9638 MUTEX_LOCK( &stream_.mutex );
9639 if ( stream_.state == STREAM_STOPPED )
9640 pthread_cond_signal( &handle->runnable );
9641 MUTEX_UNLOCK( &stream_.mutex );
9642 pthread_join( stream_.callbackInfo.thread, NULL );
9644 if ( stream_.state == STREAM_RUNNING ) {
9645 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9646 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9648 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9649 stream_.state = STREAM_STOPPED;
9653 pthread_cond_destroy( &handle->runnable );
9654 if ( handle->id[0] ) close( handle->id[0] );
9655 if ( handle->id[1] ) close( handle->id[1] );
9657 stream_.apiHandle = 0;
9660 for ( int i=0; i<2; i++ ) {
9661 if ( stream_.userBuffer[i] ) {
9662 free( stream_.userBuffer[i] );
9663 stream_.userBuffer[i] = 0;
9667 if ( stream_.deviceBuffer ) {
9668 free( stream_.deviceBuffer );
9669 stream_.deviceBuffer = 0;
9672 stream_.mode = UNINITIALIZED;
9673 stream_.state = STREAM_CLOSED;
9676 void RtApiOss :: startStream()
9679 if ( stream_.state == STREAM_RUNNING ) {
9680 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9681 error( RtAudioError::WARNING );
9685 MUTEX_LOCK( &stream_.mutex );
9687 #if defined( HAVE_GETTIMEOFDAY )
9688 gettimeofday( &stream_.lastTickTimestamp, NULL );
9691 stream_.state = STREAM_RUNNING;
9693 // No need to do anything else here ... OSS automatically starts
9694 // when fed samples.
9696 MUTEX_UNLOCK( &stream_.mutex );
9698 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9699 pthread_cond_signal( &handle->runnable );
9702 void RtApiOss :: stopStream()
9705 if ( stream_.state == STREAM_STOPPED ) {
9706 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9707 error( RtAudioError::WARNING );
9711 MUTEX_LOCK( &stream_.mutex );
9713 // The state might change while waiting on a mutex.
9714 if ( stream_.state == STREAM_STOPPED ) {
9715 MUTEX_UNLOCK( &stream_.mutex );
9720 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9721 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9723 // Flush the output with zeros a few times.
9726 RtAudioFormat format;
9728 if ( stream_.doConvertBuffer[0] ) {
9729 buffer = stream_.deviceBuffer;
9730 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9731 format = stream_.deviceFormat[0];
9734 buffer = stream_.userBuffer[0];
9735 samples = stream_.bufferSize * stream_.nUserChannels[0];
9736 format = stream_.userFormat;
9739 memset( buffer, 0, samples * formatBytes(format) );
9740 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9741 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9742 if ( result == -1 ) {
9743 errorText_ = "RtApiOss::stopStream: audio write error.";
9744 error( RtAudioError::WARNING );
9748 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9749 if ( result == -1 ) {
9750 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9751 errorText_ = errorStream_.str();
9754 handle->triggered = false;
9757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9758 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9759 if ( result == -1 ) {
9760 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9761 errorText_ = errorStream_.str();
9767 stream_.state = STREAM_STOPPED;
9768 MUTEX_UNLOCK( &stream_.mutex );
9770 if ( result != -1 ) return;
9771 error( RtAudioError::SYSTEM_ERROR );
9774 void RtApiOss :: abortStream()
9777 if ( stream_.state == STREAM_STOPPED ) {
9778 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9779 error( RtAudioError::WARNING );
9783 MUTEX_LOCK( &stream_.mutex );
9785 // The state might change while waiting on a mutex.
9786 if ( stream_.state == STREAM_STOPPED ) {
9787 MUTEX_UNLOCK( &stream_.mutex );
9792 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9793 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9794 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9795 if ( result == -1 ) {
9796 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9797 errorText_ = errorStream_.str();
9800 handle->triggered = false;
9803 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9804 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9805 if ( result == -1 ) {
9806 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9807 errorText_ = errorStream_.str();
9813 stream_.state = STREAM_STOPPED;
9814 MUTEX_UNLOCK( &stream_.mutex );
9816 if ( result != -1 ) return;
9817 error( RtAudioError::SYSTEM_ERROR );
9820 void RtApiOss :: callbackEvent()
9822 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9823 if ( stream_.state == STREAM_STOPPED ) {
9824 MUTEX_LOCK( &stream_.mutex );
9825 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9826 if ( stream_.state != STREAM_RUNNING ) {
9827 MUTEX_UNLOCK( &stream_.mutex );
9830 MUTEX_UNLOCK( &stream_.mutex );
9833 if ( stream_.state == STREAM_CLOSED ) {
9834 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9835 error( RtAudioError::WARNING );
9839 // Invoke user callback to get fresh output data.
9840 int doStopStream = 0;
9841 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9842 double streamTime = getStreamTime();
9843 RtAudioStreamStatus status = 0;
9844 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9845 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9846 handle->xrun[0] = false;
9848 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9849 status |= RTAUDIO_INPUT_OVERFLOW;
9850 handle->xrun[1] = false;
9852 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9853 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9854 if ( doStopStream == 2 ) {
9855 this->abortStream();
9859 MUTEX_LOCK( &stream_.mutex );
9861 // The state might change while waiting on a mutex.
9862 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9867 RtAudioFormat format;
9869 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9871 // Setup parameters and do buffer conversion if necessary.
9872 if ( stream_.doConvertBuffer[0] ) {
9873 buffer = stream_.deviceBuffer;
9874 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9875 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9876 format = stream_.deviceFormat[0];
9879 buffer = stream_.userBuffer[0];
9880 samples = stream_.bufferSize * stream_.nUserChannels[0];
9881 format = stream_.userFormat;
9884 // Do byte swapping if necessary.
9885 if ( stream_.doByteSwap[0] )
9886 byteSwapBuffer( buffer, samples, format );
9888 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9890 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9891 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9892 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9893 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9894 handle->triggered = true;
9897 // Write samples to device.
9898 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9900 if ( result == -1 ) {
9901 // We'll assume this is an underrun, though there isn't a
9902 // specific means for determining that.
9903 handle->xrun[0] = true;
9904 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9905 error( RtAudioError::WARNING );
9906 // Continue on to input section.
9910 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9912 // Setup parameters.
9913 if ( stream_.doConvertBuffer[1] ) {
9914 buffer = stream_.deviceBuffer;
9915 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9916 format = stream_.deviceFormat[1];
9919 buffer = stream_.userBuffer[1];
9920 samples = stream_.bufferSize * stream_.nUserChannels[1];
9921 format = stream_.userFormat;
9924 // Read samples from device.
9925 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9927 if ( result == -1 ) {
9928 // We'll assume this is an overrun, though there isn't a
9929 // specific means for determining that.
9930 handle->xrun[1] = true;
9931 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9932 error( RtAudioError::WARNING );
9936 // Do byte swapping if necessary.
9937 if ( stream_.doByteSwap[1] )
9938 byteSwapBuffer( buffer, samples, format );
9940 // Do buffer conversion if necessary.
9941 if ( stream_.doConvertBuffer[1] )
9942 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9946 MUTEX_UNLOCK( &stream_.mutex );
9948 RtApi::tickStreamTime();
9949 if ( doStopStream == 1 ) this->stopStream();
9952 static void *ossCallbackHandler( void *ptr )
9954 CallbackInfo *info = (CallbackInfo *) ptr;
9955 RtApiOss *object = (RtApiOss *) info->object;
9956 bool *isRunning = &info->isRunning;
9958 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9959 if (info->doRealtime) {
9960 std::cerr << "RtAudio oss: " <<
9961 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9962 "running realtime scheduling" << std::endl;
9966 while ( *isRunning == true ) {
9967 pthread_testcancel();
9968 object->callbackEvent();
9971 pthread_exit( NULL );
9974 //******************** End of __LINUX_OSS__ *********************//
9978 // *************************************************** //
9980 // Protected common (OS-independent) RtAudio methods.
9982 // *************************************************** //
9984 // This method can be modified to control the behavior of error
9985 // message printing.
9986 void RtApi :: error( RtAudioError::Type type )
9988 errorStream_.str(""); // clear the ostringstream
9990 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9991 if ( errorCallback ) {
9992 const std::string errorMessage = errorText_;
9993 errorCallback( type, errorMessage );
9996 if ( showWarnings_ == true )
9997 std::cerr << '\n' << errorText_ << "\n\n";
10002 void RtApi :: verifyStream()
10004 if ( stream_.state == STREAM_CLOSED ) {
10005 errorText_ = "RtApi:: a stream is not open!";
10006 error( RtAudioError::INVALID_USE );
10011 void RtApi :: clearStreamInfo()
10013 stream_.mode = UNINITIALIZED;
10014 stream_.state = STREAM_CLOSED;
10015 stream_.sampleRate = 0;
10016 stream_.bufferSize = 0;
10017 stream_.nBuffers = 0;
10018 stream_.userFormat = 0;
10019 stream_.userInterleaved = true;
10020 stream_.streamTime = 0.0;
10021 stream_.apiHandle = 0;
10022 stream_.deviceBuffer = 0;
10023 stream_.callbackInfo.callback = 0;
10024 stream_.callbackInfo.userData = 0;
10025 stream_.callbackInfo.isRunning = false;
10026 stream_.callbackInfo.errorCallback = 0;
10027 for ( int i=0; i<2; i++ ) {
10028 stream_.device[i] = 11111;
10029 stream_.doConvertBuffer[i] = false;
10030 stream_.deviceInterleaved[i] = true;
10031 stream_.doByteSwap[i] = false;
10032 stream_.nUserChannels[i] = 0;
10033 stream_.nDeviceChannels[i] = 0;
10034 stream_.channelOffset[i] = 0;
10035 stream_.deviceFormat[i] = 0;
10036 stream_.latency[i] = 0;
10037 stream_.userBuffer[i] = 0;
10038 stream_.convertInfo[i].channels = 0;
10039 stream_.convertInfo[i].inJump = 0;
10040 stream_.convertInfo[i].outJump = 0;
10041 stream_.convertInfo[i].inFormat = 0;
10042 stream_.convertInfo[i].outFormat = 0;
10043 stream_.convertInfo[i].inOffset.clear();
10044 stream_.convertInfo[i].outOffset.clear();
10048 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10050 if ( format == RTAUDIO_SINT16 )
10052 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10054 else if ( format == RTAUDIO_FLOAT64 )
10056 else if ( format == RTAUDIO_SINT24 )
10058 else if ( format == RTAUDIO_SINT8 )
10061 errorText_ = "RtApi::formatBytes: undefined format.";
10062 error( RtAudioError::WARNING );
10067 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10069 if ( mode == INPUT ) { // convert device to user buffer
10070 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10071 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10072 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10073 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10075 else { // convert user to device buffer
10076 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10077 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10078 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10079 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10082 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10083 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10085 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10087 // Set up the interleave/deinterleave offsets.
10088 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10089 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10090 ( mode == INPUT && stream_.userInterleaved ) ) {
10091 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10092 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10093 stream_.convertInfo[mode].outOffset.push_back( k );
10094 stream_.convertInfo[mode].inJump = 1;
10098 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10099 stream_.convertInfo[mode].inOffset.push_back( k );
10100 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10101 stream_.convertInfo[mode].outJump = 1;
10105 else { // no (de)interleaving
10106 if ( stream_.userInterleaved ) {
10107 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10108 stream_.convertInfo[mode].inOffset.push_back( k );
10109 stream_.convertInfo[mode].outOffset.push_back( k );
10113 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10114 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10115 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10116 stream_.convertInfo[mode].inJump = 1;
10117 stream_.convertInfo[mode].outJump = 1;
10122 // Add channel offset.
10123 if ( firstChannel > 0 ) {
10124 if ( stream_.deviceInterleaved[mode] ) {
10125 if ( mode == OUTPUT ) {
10126 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10127 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10130 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10131 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10135 if ( mode == OUTPUT ) {
10136 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10137 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10140 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10141 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10147 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10149 // This function does format conversion, input/output channel compensation, and
10150 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10151 // the lower three bytes of a 32-bit integer.
10153 // Clear our device buffer when in/out duplex device channels are different
10154 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10155 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10156 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10159 if (info.outFormat == RTAUDIO_FLOAT64) {
10161 Float64 *out = (Float64 *)outBuffer;
10163 if (info.inFormat == RTAUDIO_SINT8) {
10164 signed char *in = (signed char *)inBuffer;
10165 scale = 1.0 / 127.5;
10166 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10167 for (j=0; j<info.channels; j++) {
10168 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10169 out[info.outOffset[j]] += 0.5;
10170 out[info.outOffset[j]] *= scale;
10173 out += info.outJump;
10176 else if (info.inFormat == RTAUDIO_SINT16) {
10177 Int16 *in = (Int16 *)inBuffer;
10178 scale = 1.0 / 32767.5;
10179 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10180 for (j=0; j<info.channels; j++) {
10181 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10182 out[info.outOffset[j]] += 0.5;
10183 out[info.outOffset[j]] *= scale;
10186 out += info.outJump;
10189 else if (info.inFormat == RTAUDIO_SINT24) {
10190 Int24 *in = (Int24 *)inBuffer;
10191 scale = 1.0 / 8388607.5;
10192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10193 for (j=0; j<info.channels; j++) {
10194 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10195 out[info.outOffset[j]] += 0.5;
10196 out[info.outOffset[j]] *= scale;
10199 out += info.outJump;
10202 else if (info.inFormat == RTAUDIO_SINT32) {
10203 Int32 *in = (Int32 *)inBuffer;
10204 scale = 1.0 / 2147483647.5;
10205 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10206 for (j=0; j<info.channels; j++) {
10207 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10208 out[info.outOffset[j]] += 0.5;
10209 out[info.outOffset[j]] *= scale;
10212 out += info.outJump;
10215 else if (info.inFormat == RTAUDIO_FLOAT32) {
10216 Float32 *in = (Float32 *)inBuffer;
10217 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10218 for (j=0; j<info.channels; j++) {
10219 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10222 out += info.outJump;
10225 else if (info.inFormat == RTAUDIO_FLOAT64) {
10226 // Channel compensation and/or (de)interleaving only.
10227 Float64 *in = (Float64 *)inBuffer;
10228 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10229 for (j=0; j<info.channels; j++) {
10230 out[info.outOffset[j]] = in[info.inOffset[j]];
10233 out += info.outJump;
10237 else if (info.outFormat == RTAUDIO_FLOAT32) {
10239 Float32 *out = (Float32 *)outBuffer;
10241 if (info.inFormat == RTAUDIO_SINT8) {
10242 signed char *in = (signed char *)inBuffer;
10243 scale = (Float32) ( 1.0 / 127.5 );
10244 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10245 for (j=0; j<info.channels; j++) {
10246 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10247 out[info.outOffset[j]] += 0.5;
10248 out[info.outOffset[j]] *= scale;
10251 out += info.outJump;
10254 else if (info.inFormat == RTAUDIO_SINT16) {
10255 Int16 *in = (Int16 *)inBuffer;
10256 scale = (Float32) ( 1.0 / 32767.5 );
10257 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10258 for (j=0; j<info.channels; j++) {
10259 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10260 out[info.outOffset[j]] += 0.5;
10261 out[info.outOffset[j]] *= scale;
10264 out += info.outJump;
10267 else if (info.inFormat == RTAUDIO_SINT24) {
10268 Int24 *in = (Int24 *)inBuffer;
10269 scale = (Float32) ( 1.0 / 8388607.5 );
10270 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10271 for (j=0; j<info.channels; j++) {
10272 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10273 out[info.outOffset[j]] += 0.5;
10274 out[info.outOffset[j]] *= scale;
10277 out += info.outJump;
10280 else if (info.inFormat == RTAUDIO_SINT32) {
10281 Int32 *in = (Int32 *)inBuffer;
10282 scale = (Float32) ( 1.0 / 2147483647.5 );
10283 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10284 for (j=0; j<info.channels; j++) {
10285 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10286 out[info.outOffset[j]] += 0.5;
10287 out[info.outOffset[j]] *= scale;
10290 out += info.outJump;
10293 else if (info.inFormat == RTAUDIO_FLOAT32) {
10294 // Channel compensation and/or (de)interleaving only.
10295 Float32 *in = (Float32 *)inBuffer;
10296 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10297 for (j=0; j<info.channels; j++) {
10298 out[info.outOffset[j]] = in[info.inOffset[j]];
10301 out += info.outJump;
10304 else if (info.inFormat == RTAUDIO_FLOAT64) {
10305 Float64 *in = (Float64 *)inBuffer;
10306 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10307 for (j=0; j<info.channels; j++) {
10308 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10311 out += info.outJump;
10315 else if (info.outFormat == RTAUDIO_SINT32) {
10316 Int32 *out = (Int32 *)outBuffer;
10317 if (info.inFormat == RTAUDIO_SINT8) {
10318 signed char *in = (signed char *)inBuffer;
10319 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10320 for (j=0; j<info.channels; j++) {
10321 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10322 out[info.outOffset[j]] <<= 24;
10325 out += info.outJump;
10328 else if (info.inFormat == RTAUDIO_SINT16) {
10329 Int16 *in = (Int16 *)inBuffer;
10330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10331 for (j=0; j<info.channels; j++) {
10332 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10333 out[info.outOffset[j]] <<= 16;
10336 out += info.outJump;
10339 else if (info.inFormat == RTAUDIO_SINT24) {
10340 Int24 *in = (Int24 *)inBuffer;
10341 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10342 for (j=0; j<info.channels; j++) {
10343 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10344 out[info.outOffset[j]] <<= 8;
10347 out += info.outJump;
10350 else if (info.inFormat == RTAUDIO_SINT32) {
10351 // Channel compensation and/or (de)interleaving only.
10352 Int32 *in = (Int32 *)inBuffer;
10353 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10354 for (j=0; j<info.channels; j++) {
10355 out[info.outOffset[j]] = in[info.inOffset[j]];
10358 out += info.outJump;
10361 else if (info.inFormat == RTAUDIO_FLOAT32) {
10362 Float32 *in = (Float32 *)inBuffer;
10363 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10364 for (j=0; j<info.channels; j++) {
10365 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10368 out += info.outJump;
10371 else if (info.inFormat == RTAUDIO_FLOAT64) {
10372 Float64 *in = (Float64 *)inBuffer;
10373 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10374 for (j=0; j<info.channels; j++) {
10375 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10378 out += info.outJump;
10382 else if (info.outFormat == RTAUDIO_SINT24) {
10383 Int24 *out = (Int24 *)outBuffer;
10384 if (info.inFormat == RTAUDIO_SINT8) {
10385 signed char *in = (signed char *)inBuffer;
10386 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10387 for (j=0; j<info.channels; j++) {
10388 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10389 //out[info.outOffset[j]] <<= 16;
10392 out += info.outJump;
10395 else if (info.inFormat == RTAUDIO_SINT16) {
10396 Int16 *in = (Int16 *)inBuffer;
10397 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10398 for (j=0; j<info.channels; j++) {
10399 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10400 //out[info.outOffset[j]] <<= 8;
10403 out += info.outJump;
10406 else if (info.inFormat == RTAUDIO_SINT24) {
10407 // Channel compensation and/or (de)interleaving only.
10408 Int24 *in = (Int24 *)inBuffer;
10409 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10410 for (j=0; j<info.channels; j++) {
10411 out[info.outOffset[j]] = in[info.inOffset[j]];
10414 out += info.outJump;
10417 else if (info.inFormat == RTAUDIO_SINT32) {
10418 Int32 *in = (Int32 *)inBuffer;
10419 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10420 for (j=0; j<info.channels; j++) {
10421 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10422 //out[info.outOffset[j]] >>= 8;
10425 out += info.outJump;
10428 else if (info.inFormat == RTAUDIO_FLOAT32) {
10429 Float32 *in = (Float32 *)inBuffer;
10430 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10431 for (j=0; j<info.channels; j++) {
10432 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10435 out += info.outJump;
10438 else if (info.inFormat == RTAUDIO_FLOAT64) {
10439 Float64 *in = (Float64 *)inBuffer;
10440 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10441 for (j=0; j<info.channels; j++) {
10442 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10445 out += info.outJump;
10449 else if (info.outFormat == RTAUDIO_SINT16) {
10450 Int16 *out = (Int16 *)outBuffer;
10451 if (info.inFormat == RTAUDIO_SINT8) {
10452 signed char *in = (signed char *)inBuffer;
10453 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10454 for (j=0; j<info.channels; j++) {
10455 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10456 out[info.outOffset[j]] <<= 8;
10459 out += info.outJump;
10462 else if (info.inFormat == RTAUDIO_SINT16) {
10463 // Channel compensation and/or (de)interleaving only.
10464 Int16 *in = (Int16 *)inBuffer;
10465 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10466 for (j=0; j<info.channels; j++) {
10467 out[info.outOffset[j]] = in[info.inOffset[j]];
10470 out += info.outJump;
10473 else if (info.inFormat == RTAUDIO_SINT24) {
10474 Int24 *in = (Int24 *)inBuffer;
10475 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10476 for (j=0; j<info.channels; j++) {
10477 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10480 out += info.outJump;
10483 else if (info.inFormat == RTAUDIO_SINT32) {
10484 Int32 *in = (Int32 *)inBuffer;
10485 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10486 for (j=0; j<info.channels; j++) {
10487 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10490 out += info.outJump;
10493 else if (info.inFormat == RTAUDIO_FLOAT32) {
10494 Float32 *in = (Float32 *)inBuffer;
10495 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10496 for (j=0; j<info.channels; j++) {
10497 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10500 out += info.outJump;
10503 else if (info.inFormat == RTAUDIO_FLOAT64) {
10504 Float64 *in = (Float64 *)inBuffer;
10505 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10506 for (j=0; j<info.channels; j++) {
10507 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10510 out += info.outJump;
10514 else if (info.outFormat == RTAUDIO_SINT8) {
10515 signed char *out = (signed char *)outBuffer;
10516 if (info.inFormat == RTAUDIO_SINT8) {
10517 // Channel compensation and/or (de)interleaving only.
10518 signed char *in = (signed char *)inBuffer;
10519 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10520 for (j=0; j<info.channels; j++) {
10521 out[info.outOffset[j]] = in[info.inOffset[j]];
10524 out += info.outJump;
10527 if (info.inFormat == RTAUDIO_SINT16) {
10528 Int16 *in = (Int16 *)inBuffer;
10529 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10530 for (j=0; j<info.channels; j++) {
10531 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10534 out += info.outJump;
10537 else if (info.inFormat == RTAUDIO_SINT24) {
10538 Int24 *in = (Int24 *)inBuffer;
10539 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10540 for (j=0; j<info.channels; j++) {
10541 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10544 out += info.outJump;
10547 else if (info.inFormat == RTAUDIO_SINT32) {
10548 Int32 *in = (Int32 *)inBuffer;
10549 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10550 for (j=0; j<info.channels; j++) {
10551 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10554 out += info.outJump;
10557 else if (info.inFormat == RTAUDIO_FLOAT32) {
10558 Float32 *in = (Float32 *)inBuffer;
10559 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10560 for (j=0; j<info.channels; j++) {
10561 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10564 out += info.outJump;
10567 else if (info.inFormat == RTAUDIO_FLOAT64) {
10568 Float64 *in = (Float64 *)inBuffer;
10569 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10570 for (j=0; j<info.channels; j++) {
10571 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10574 out += info.outJump;
10580 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10581 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10582 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10584 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10590 if ( format == RTAUDIO_SINT16 ) {
10591 for ( unsigned int i=0; i<samples; i++ ) {
10592 // Swap 1st and 2nd bytes.
10597 // Increment 2 bytes.
10601 else if ( format == RTAUDIO_SINT32 ||
10602 format == RTAUDIO_FLOAT32 ) {
10603 for ( unsigned int i=0; i<samples; i++ ) {
10604 // Swap 1st and 4th bytes.
10609 // Swap 2nd and 3rd bytes.
10615 // Increment 3 more bytes.
10619 else if ( format == RTAUDIO_SINT24 ) {
10620 for ( unsigned int i=0; i<samples; i++ ) {
10621 // Swap 1st and 3rd bytes.
10626 // Increment 2 more bytes.
10630 else if ( format == RTAUDIO_FLOAT64 ) {
10631 for ( unsigned int i=0; i<samples; i++ ) {
10632 // Swap 1st and 8th bytes
10637 // Swap 2nd and 7th bytes
10643 // Swap 3rd and 6th bytes
10649 // Swap 4th and 5th bytes
10655 // Increment 5 more bytes.
10661 // Indentation settings for Vim and Emacs
10663 // Local Variables:
10664 // c-basic-offset: 2
10665 // indent-tabs-mode: nil
10668 // vim: et sts=2 sw=2