1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 6.0.0beta1
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
154 extern "C" const unsigned int rtaudio_num_compiled_apis =
155 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
158 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
159 // If the build breaks here, check that they match.
160 template<bool b> class StaticAssert { private: StaticAssert() {} };
161 template<> class StaticAssert<true>{ public: StaticAssert() {} };
162 class StaticAssertions { StaticAssertions() {
163 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
166 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
168 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
169 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
172 std::string RtAudio :: getApiName( RtAudio::Api api )
174 if (api < 0 || api >= RtAudio::NUM_APIS)
176 return rtaudio_api_names[api][0];
179 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
181 if (api < 0 || api >= RtAudio::NUM_APIS)
183 return rtaudio_api_names[api][1];
186 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
189 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
190 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
191 return rtaudio_compiled_apis[i];
192 return RtAudio::UNSPECIFIED;
195 void RtAudio :: openRtApi( RtAudio::Api api )
201 #if defined(__UNIX_JACK__)
202 if ( api == UNIX_JACK )
203 rtapi_ = new RtApiJack();
205 #if defined(__LINUX_ALSA__)
206 if ( api == LINUX_ALSA )
207 rtapi_ = new RtApiAlsa();
209 #if defined(__LINUX_PULSE__)
210 if ( api == LINUX_PULSE )
211 rtapi_ = new RtApiPulse();
213 #if defined(__LINUX_OSS__)
214 if ( api == LINUX_OSS )
215 rtapi_ = new RtApiOss();
217 #if defined(__WINDOWS_ASIO__)
218 if ( api == WINDOWS_ASIO )
219 rtapi_ = new RtApiAsio();
221 #if defined(__WINDOWS_WASAPI__)
222 if ( api == WINDOWS_WASAPI )
223 rtapi_ = new RtApiWasapi();
225 #if defined(__WINDOWS_DS__)
226 if ( api == WINDOWS_DS )
227 rtapi_ = new RtApiDs();
229 #if defined(__MACOSX_CORE__)
230 if ( api == MACOSX_CORE )
231 rtapi_ = new RtApiCore();
233 #if defined(__RTAUDIO_DUMMY__)
234 if ( api == RTAUDIO_DUMMY )
235 rtapi_ = new RtApiDummy();
239 RtAudio :: RtAudio( RtAudio::Api api, RtAudioErrorCallback errorCallback )
243 std::string errorMessage;
244 if ( api != UNSPECIFIED ) {
245 // Attempt to open the specified API.
249 if ( errorCallback ) rtapi_->setErrorCallback( errorCallback );
253 // No compiled support for specified API value. Issue a warning
254 // and continue as if no API was specified.
255 errorMessage = "RtAudio: no compiled support for specified API argument!";
257 errorCallback( RTAUDIO_INVALID_USE, errorMessage );
259 std::cerr << '\n' << errorMessage << '\n' << std::endl;
262 // Iterate through the compiled APIs and return as soon as we find
263 // one with at least one device or we reach the end of the list.
264 std::vector< RtAudio::Api > apis;
265 getCompiledApi( apis );
266 for ( unsigned int i=0; i<apis.size(); i++ ) {
267 openRtApi( apis[i] );
268 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
272 if ( errorCallback ) rtapi_->setErrorCallback( errorCallback );
276 // It should not be possible to get here because the preprocessor
277 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
278 // if no API-specific definitions are passed to the compiler. But just
279 // in case something weird happens, issue an error message and abort.
280 errorMessage = "RtAudio: no compiled API support found ... critical error!";
282 errorCallback( RTAUDIO_INVALID_USE, errorMessage );
284 std::cerr << '\n' << errorMessage << '\n' << std::endl;
288 RtAudio :: ~RtAudio()
294 RtAudioErrorType RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
295 RtAudio::StreamParameters *inputParameters,
296 RtAudioFormat format, unsigned int sampleRate,
297 unsigned int *bufferFrames,
298 RtAudioCallback callback, void *userData,
299 RtAudio::StreamOptions *options )
301 return rtapi_->openStream( outputParameters, inputParameters, format,
302 sampleRate, bufferFrames, callback,
306 // *************************************************** //
308 // Public RtApi definitions (see end of file for
309 // private or protected utility functions).
311 // *************************************************** //
316 MUTEX_INITIALIZE( &stream_.mutex );
318 showWarnings_ = true;
323 MUTEX_DESTROY( &stream_.mutex );
326 RtAudioErrorType RtApi :: openStream( RtAudio::StreamParameters *oParams,
327 RtAudio::StreamParameters *iParams,
328 RtAudioFormat format, unsigned int sampleRate,
329 unsigned int *bufferFrames,
330 RtAudioCallback callback, void *userData,
331 RtAudio::StreamOptions *options )
333 if ( stream_.state != STREAM_CLOSED ) {
334 errorText_ = "RtApi::openStream: a stream is already open!";
335 return error( RTAUDIO_INVALID_USE );
338 // Clear stream information potentially left from a previously open stream.
341 if ( oParams && oParams->nChannels < 1 ) {
342 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
343 return error( RTAUDIO_INVALID_USE );
346 if ( iParams && iParams->nChannels < 1 ) {
347 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
348 return error( RTAUDIO_INVALID_USE );
351 if ( oParams == NULL && iParams == NULL ) {
352 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
353 return error( RTAUDIO_INVALID_USE );
356 if ( formatBytes(format) == 0 ) {
357 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
358 return error( RTAUDIO_INVALID_USE );
361 unsigned int nDevices = getDeviceCount();
362 unsigned int oChannels = 0;
364 oChannels = oParams->nChannels;
365 if ( oParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
367 return error( RTAUDIO_INVALID_USE );
371 unsigned int iChannels = 0;
373 iChannels = iParams->nChannels;
374 if ( iParams->deviceId >= nDevices ) {
375 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
376 return error( RTAUDIO_INVALID_USE );
382 if ( oChannels > 0 ) {
384 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
385 sampleRate, format, bufferFrames, options );
386 if ( result == false ) {
387 return error( RTAUDIO_SYSTEM_ERROR );
391 if ( iChannels > 0 ) {
393 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
394 sampleRate, format, bufferFrames, options );
395 if ( result == false ) {
396 if ( oChannels > 0 ) closeStream();
397 return error( RTAUDIO_SYSTEM_ERROR );
401 stream_.callbackInfo.callback = (void *) callback;
402 stream_.callbackInfo.userData = userData;
404 if ( options ) options->numberOfBuffers = stream_.nBuffers;
405 stream_.state = STREAM_STOPPED;
406 return RTAUDIO_NO_ERROR;
409 unsigned int RtApi :: getDefaultInputDevice( void )
411 // Should be implemented in subclasses if possible.
415 unsigned int RtApi :: getDefaultOutputDevice( void )
417 // Should be implemented in subclasses if possible.
421 void RtApi :: closeStream( void )
423 // MUST be implemented in subclasses!
427 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
428 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
429 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
430 RtAudio::StreamOptions * /*options*/ )
432 // MUST be implemented in subclasses!
436 void RtApi :: tickStreamTime( void )
438 // Subclasses that do not provide their own implementation of
439 // getStreamTime should call this function once per buffer I/O to
440 // provide basic stream time support.
442 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
445 #if defined( HAVE_GETTIMEOFDAY )
446 gettimeofday( &stream_.lastTickTimestamp, NULL );
451 long RtApi :: getStreamLatency( void )
453 long totalLatency = 0;
454 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
455 totalLatency = stream_.latency[0];
456 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
457 totalLatency += stream_.latency[1];
463 double RtApi :: getStreamTime( void )
465 #if defined( HAVE_GETTIMEOFDAY )
466 // Return a very accurate estimate of the stream time by
467 // adding in the elapsed time since the last tick.
471 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
472 return stream_.streamTime;
474 gettimeofday( &now, NULL );
475 then = stream_.lastTickTimestamp;
476 return stream_.streamTime +
477 ((now.tv_sec + 0.000001 * now.tv_usec) -
478 (then.tv_sec + 0.000001 * then.tv_usec));
480 return stream_.streamTime;
485 void RtApi :: setStreamTime( double time )
488 stream_.streamTime = time;
490 #if defined( HAVE_GETTIMEOFDAY )
491 gettimeofday( &stream_.lastTickTimestamp, NULL );
496 unsigned int RtApi :: getStreamSampleRate( void )
498 if ( isStreamOpen() ) return stream_.sampleRate;
503 // *************************************************** //
505 // OS/API-specific methods.
507 // *************************************************** //
509 #if defined(__MACOSX_CORE__)
511 // The OS X CoreAudio API is designed to use a separate callback
512 // procedure for each of its audio devices. A single RtAudio duplex
513 // stream using two different devices is supported here, though it
514 // cannot be guaranteed to always behave correctly because we cannot
515 // synchronize these two callbacks.
517 // A property listener is installed for over/underrun information.
518 // However, no functionality is currently provided to allow property
519 // listeners to trigger user handlers because it is unclear what could
520 // be done if a critical stream parameter (buffer size, sample rate,
521 // device disconnect) notification arrived. The listeners entail
522 // quite a bit of extra code and most likely, a user program wouldn't
523 // be prepared for the result anyway. However, we do provide a flag
524 // to the client callback function to inform of an over/underrun.
526 // A structure to hold various information related to the CoreAudio API
529 AudioDeviceID id[2]; // device ids
530 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
531 AudioDeviceIOProcID procId[2];
533 UInt32 iStream[2]; // device stream index (or first if using multiple)
534 UInt32 nStreams[2]; // number of streams to use
537 pthread_cond_t condition;
538 int drainCounter; // Tracks callback counts when draining
539 bool internalDrain; // Indicates if stop is initiated from callback or not.
542 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
545 RtApiCore:: RtApiCore()
547 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
548 // This is a largely undocumented but absolutely necessary
549 // requirement starting with OS-X 10.6. If not called, queries and
550 // updates to various audio device properties are not handled
552 CFRunLoopRef theRunLoop = NULL;
553 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
554 kAudioObjectPropertyScopeGlobal,
555 kAudioObjectPropertyElementMaster };
556 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
557 if ( result != noErr ) {
558 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
559 error( RTAUDIO_SYSTEM_ERROR );
564 RtApiCore :: ~RtApiCore()
566 // The subclass destructor gets called before the base class
567 // destructor, so close an existing stream before deallocating
568 // apiDeviceId memory.
569 if ( stream_.state != STREAM_CLOSED ) closeStream();
572 unsigned int RtApiCore :: getDeviceCount( void )
574 // Find out how many audio devices there are, if any.
576 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
577 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
578 if ( result != noErr ) {
579 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
580 error( RTAUDIO_SYSTEM_ERROR );
584 return dataSize / sizeof( AudioDeviceID );
587 unsigned int RtApiCore :: getDefaultInputDevice( void )
589 unsigned int nDevices = getDeviceCount();
590 if ( nDevices <= 1 ) return 0;
593 UInt32 dataSize = sizeof( AudioDeviceID );
594 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
595 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
596 if ( result != noErr ) {
597 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
598 error( RTAUDIO_SYSTEM_ERROR );
602 dataSize *= nDevices;
603 AudioDeviceID deviceList[ nDevices ];
604 property.mSelector = kAudioHardwarePropertyDevices;
605 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
606 if ( result != noErr ) {
607 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
608 error( RTAUDIO_SYSTEM_ERROR );
612 for ( unsigned int i=0; i<nDevices; i++ )
613 if ( id == deviceList[i] ) return i;
615 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
616 error( RTAUDIO_WARNING );
620 unsigned int RtApiCore :: getDefaultOutputDevice( void )
622 unsigned int nDevices = getDeviceCount();
623 if ( nDevices <= 1 ) return 0;
626 UInt32 dataSize = sizeof( AudioDeviceID );
627 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
628 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
629 if ( result != noErr ) {
630 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
631 error( RTAUDIO_SYSTEM_ERROR );
635 dataSize = sizeof( AudioDeviceID ) * nDevices;
636 AudioDeviceID deviceList[ nDevices ];
637 property.mSelector = kAudioHardwarePropertyDevices;
638 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
639 if ( result != noErr ) {
640 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
641 error( RTAUDIO_SYSTEM_ERROR );
645 for ( unsigned int i=0; i<nDevices; i++ )
646 if ( id == deviceList[i] ) return i;
648 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
649 error( RTAUDIO_WARNING );
653 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
655 RtAudio::DeviceInfo info;
659 unsigned int nDevices = getDeviceCount();
660 if ( nDevices == 0 ) {
661 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
662 error( RTAUDIO_INVALID_USE );
666 if ( device >= nDevices ) {
667 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
668 error( RTAUDIO_INVALID_USE );
672 AudioDeviceID deviceList[ nDevices ];
673 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
674 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
675 kAudioObjectPropertyScopeGlobal,
676 kAudioObjectPropertyElementMaster };
677 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
678 0, NULL, &dataSize, (void *) &deviceList );
679 if ( result != noErr ) {
680 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
681 error( RTAUDIO_WARNING );
685 AudioDeviceID id = deviceList[ device ];
687 // Get the device name.
690 dataSize = sizeof( CFStringRef );
691 property.mSelector = kAudioObjectPropertyManufacturer;
692 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
693 if ( result != noErr ) {
694 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
695 errorText_ = errorStream_.str();
696 error( RTAUDIO_WARNING );
700 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
701 long length = CFStringGetLength(cfname);
702 char *mname = (char *)malloc(length * 3 + 1);
703 #if defined( UNICODE ) || defined( _UNICODE )
704 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
706 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
708 info.name.append( (const char *)mname, strlen(mname) );
709 info.name.append( ": " );
713 property.mSelector = kAudioObjectPropertyName;
714 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
715 if ( result != noErr ) {
716 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
717 errorText_ = errorStream_.str();
718 error( RTAUDIO_WARNING );
722 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
723 length = CFStringGetLength(cfname);
724 char *name = (char *)malloc(length * 3 + 1);
725 #if defined( UNICODE ) || defined( _UNICODE )
726 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
728 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
730 info.name.append( (const char *)name, strlen(name) );
734 // Get the output stream "configuration".
735 AudioBufferList *bufferList = nil;
736 property.mSelector = kAudioDevicePropertyStreamConfiguration;
737 property.mScope = kAudioDevicePropertyScopeOutput;
738 // property.mElement = kAudioObjectPropertyElementWildcard;
740 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
741 if ( result != noErr || dataSize == 0 ) {
742 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
743 errorText_ = errorStream_.str();
744 error( RTAUDIO_WARNING );
748 // Allocate the AudioBufferList.
749 bufferList = (AudioBufferList *) malloc( dataSize );
750 if ( bufferList == NULL ) {
751 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
752 error( RTAUDIO_WARNING );
756 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
757 if ( result != noErr || dataSize == 0 ) {
759 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
760 errorText_ = errorStream_.str();
761 error( RTAUDIO_WARNING );
765 // Get output channel information.
766 unsigned int i, nStreams = bufferList->mNumberBuffers;
767 for ( i=0; i<nStreams; i++ )
768 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
771 // Get the input stream "configuration".
772 property.mScope = kAudioDevicePropertyScopeInput;
773 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
774 if ( result != noErr || dataSize == 0 ) {
775 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
776 errorText_ = errorStream_.str();
777 error( RTAUDIO_WARNING );
781 // Allocate the AudioBufferList.
782 bufferList = (AudioBufferList *) malloc( dataSize );
783 if ( bufferList == NULL ) {
784 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
785 error( RTAUDIO_WARNING );
789 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
790 if (result != noErr || dataSize == 0) {
792 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
793 errorText_ = errorStream_.str();
794 error( RTAUDIO_WARNING );
798 // Get input channel information.
799 nStreams = bufferList->mNumberBuffers;
800 for ( i=0; i<nStreams; i++ )
801 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
804 // If device opens for both playback and capture, we determine the channels.
805 if ( info.outputChannels > 0 && info.inputChannels > 0 )
806 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
808 // Probe the device sample rates.
809 bool isInput = false;
810 if ( info.outputChannels == 0 ) isInput = true;
812 // Determine the supported sample rates.
813 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
814 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
815 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
816 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
817 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
818 errorText_ = errorStream_.str();
819 error( RTAUDIO_WARNING );
823 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
824 AudioValueRange rangeList[ nRanges ];
825 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
826 if ( result != kAudioHardwareNoError ) {
827 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
828 errorText_ = errorStream_.str();
829 error( RTAUDIO_WARNING );
833 // The sample rate reporting mechanism is a bit of a mystery. It
834 // seems that it can either return individual rates or a range of
835 // rates. I assume that if the min / max range values are the same,
836 // then that represents a single supported rate and if the min / max
837 // range values are different, the device supports an arbitrary
838 // range of values (though there might be multiple ranges, so we'll
839 // use the most conservative range).
840 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
841 bool haveValueRange = false;
842 info.sampleRates.clear();
843 for ( UInt32 i=0; i<nRanges; i++ ) {
844 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
845 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
846 info.sampleRates.push_back( tmpSr );
848 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
849 info.preferredSampleRate = tmpSr;
852 haveValueRange = true;
853 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
854 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
858 if ( haveValueRange ) {
859 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
860 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
861 info.sampleRates.push_back( SAMPLE_RATES[k] );
863 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
864 info.preferredSampleRate = SAMPLE_RATES[k];
869 // Sort and remove any redundant values
870 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
871 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
873 if ( info.sampleRates.size() == 0 ) {
874 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
875 errorText_ = errorStream_.str();
876 error( RTAUDIO_WARNING );
880 // Probe the currently configured sample rate
882 dataSize = sizeof( Float64 );
883 property.mSelector = kAudioDevicePropertyNominalSampleRate;
884 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
885 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
887 // CoreAudio always uses 32-bit floating point data for PCM streams.
888 // Thus, any other "physical" formats supported by the device are of
889 // no interest to the client.
890 info.nativeFormats = RTAUDIO_FLOAT32;
892 if ( info.outputChannels > 0 )
893 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
894 if ( info.inputChannels > 0 )
895 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
901 static OSStatus callbackHandler( AudioDeviceID inDevice,
902 const AudioTimeStamp* /*inNow*/,
903 const AudioBufferList* inInputData,
904 const AudioTimeStamp* /*inInputTime*/,
905 AudioBufferList* outOutputData,
906 const AudioTimeStamp* /*inOutputTime*/,
909 CallbackInfo *info = (CallbackInfo *) infoPointer;
911 RtApiCore *object = (RtApiCore *) info->object;
912 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
913 return kAudioHardwareUnspecifiedError;
915 return kAudioHardwareNoError;
918 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
920 const AudioObjectPropertyAddress properties[],
923 for ( UInt32 i=0; i<nAddresses; i++ ) {
924 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
925 CallbackInfo *info = (CallbackInfo *) infoPointer;
926 RtApiCore *object = (RtApiCore *) info->object;
927 info->deviceDisconnected = true;
928 object->closeStream();
929 return kAudioHardwareUnspecifiedError;
933 return kAudioHardwareNoError;
936 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
938 const AudioObjectPropertyAddress properties[],
939 void* handlePointer )
941 CoreHandle *handle = (CoreHandle *) handlePointer;
942 for ( UInt32 i=0; i<nAddresses; i++ ) {
943 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
944 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
945 handle->xrun[1] = true;
947 handle->xrun[0] = true;
951 return kAudioHardwareNoError;
954 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
955 unsigned int firstChannel, unsigned int sampleRate,
956 RtAudioFormat format, unsigned int *bufferSize,
957 RtAudio::StreamOptions *options )
960 unsigned int nDevices = getDeviceCount();
961 if ( nDevices == 0 ) {
962 // This should not happen because a check is made before this function is called.
963 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
967 if ( device >= nDevices ) {
968 // This should not happen because a check is made before this function is called.
969 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
973 AudioDeviceID deviceList[ nDevices ];
974 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
975 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
976 kAudioObjectPropertyScopeGlobal,
977 kAudioObjectPropertyElementMaster };
978 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
979 0, NULL, &dataSize, (void *) &deviceList );
980 if ( result != noErr ) {
981 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
985 AudioDeviceID id = deviceList[ device ];
987 // Setup for stream mode.
988 bool isInput = false;
989 if ( mode == INPUT ) {
991 property.mScope = kAudioDevicePropertyScopeInput;
994 property.mScope = kAudioDevicePropertyScopeOutput;
996 // Get the stream "configuration".
997 AudioBufferList *bufferList = nil;
999 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1000 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1001 if ( result != noErr || dataSize == 0 ) {
1002 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1003 errorText_ = errorStream_.str();
1007 // Allocate the AudioBufferList.
1008 bufferList = (AudioBufferList *) malloc( dataSize );
1009 if ( bufferList == NULL ) {
1010 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1014 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1015 if (result != noErr || dataSize == 0) {
1017 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1018 errorText_ = errorStream_.str();
1022 // Search for one or more streams that contain the desired number of
1023 // channels. CoreAudio devices can have an arbitrary number of
1024 // streams and each stream can have an arbitrary number of channels.
1025 // For each stream, a single buffer of interleaved samples is
1026 // provided. RtAudio prefers the use of one stream of interleaved
1027 // data or multiple consecutive single-channel streams. However, we
1028 // now support multiple consecutive multi-channel streams of
1029 // interleaved data as well.
1030 UInt32 iStream, offsetCounter = firstChannel;
1031 UInt32 nStreams = bufferList->mNumberBuffers;
1032 bool monoMode = false;
1033 bool foundStream = false;
1035 // First check that the device supports the requested number of
1037 UInt32 deviceChannels = 0;
1038 for ( iStream=0; iStream<nStreams; iStream++ )
1039 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1041 if ( deviceChannels < ( channels + firstChannel ) ) {
1043 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1044 errorText_ = errorStream_.str();
1048 // Look for a single stream meeting our needs.
1049 UInt32 firstStream = 0, streamCount = 1, streamChannels = 0, channelOffset = 0;
1050 for ( iStream=0; iStream<nStreams; iStream++ ) {
1051 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1052 if ( streamChannels >= channels + offsetCounter ) {
1053 firstStream = iStream;
1054 channelOffset = offsetCounter;
1058 if ( streamChannels > offsetCounter ) break;
1059 offsetCounter -= streamChannels;
1062 // If we didn't find a single stream above, then we should be able
1063 // to meet the channel specification with multiple streams.
1064 if ( foundStream == false ) {
1066 offsetCounter = firstChannel;
1067 for ( iStream=0; iStream<nStreams; iStream++ ) {
1068 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1069 if ( streamChannels > offsetCounter ) break;
1070 offsetCounter -= streamChannels;
1073 firstStream = iStream;
1074 channelOffset = offsetCounter;
1075 Int32 channelCounter = channels + offsetCounter - streamChannels;
1077 if ( streamChannels > 1 ) monoMode = false;
1078 while ( channelCounter > 0 ) {
1079 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1080 if ( streamChannels > 1 ) monoMode = false;
1081 channelCounter -= streamChannels;
1088 // Determine the buffer size.
1089 AudioValueRange bufferRange;
1090 dataSize = sizeof( AudioValueRange );
1091 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1092 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1094 if ( result != noErr ) {
1095 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1096 errorText_ = errorStream_.str();
1100 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1101 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMaximum;
1102 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1104 // Set the buffer size. For multiple streams, I'm assuming we only
1105 // need to make this setting for the master channel.
1106 UInt32 theSize = (UInt32) *bufferSize;
1107 dataSize = sizeof( UInt32 );
1108 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1109 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1111 if ( result != noErr ) {
1112 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1113 errorText_ = errorStream_.str();
1117 // If attempting to setup a duplex stream, the bufferSize parameter
1118 // MUST be the same in both directions!
1119 *bufferSize = theSize;
1120 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1121 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1122 errorText_ = errorStream_.str();
1126 stream_.bufferSize = *bufferSize;
1127 stream_.nBuffers = 1;
1129 // Try to set "hog" mode ... it's not clear to me this is working.
1130 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1132 dataSize = sizeof( hog_pid );
1133 property.mSelector = kAudioDevicePropertyHogMode;
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1135 if ( result != noErr ) {
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1137 errorText_ = errorStream_.str();
1141 if ( hog_pid != getpid() ) {
1143 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1146 errorText_ = errorStream_.str();
1152 // Check and if necessary, change the sample rate for the device.
1153 Float64 nominalRate;
1154 dataSize = sizeof( Float64 );
1155 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1156 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1159 errorText_ = errorStream_.str();
1163 // Only try to change the sample rate if off by more than 1 Hz.
1164 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1166 nominalRate = (Float64) sampleRate;
1167 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1168 if ( result != noErr ) {
1169 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1170 errorText_ = errorStream_.str();
1174 // Now wait until the reported nominal rate is what we just set.
1175 UInt32 microCounter = 0;
1176 Float64 reportedRate = 0.0;
1177 while ( reportedRate != nominalRate ) {
1178 microCounter += 5000;
1179 if ( microCounter > 2000000 ) break;
1181 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1184 if ( microCounter > 2000000 ) {
1185 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1186 errorText_ = errorStream_.str();
1191 // Now set the stream format for all streams. Also, check the
1192 // physical format of the device and change that if necessary.
1193 AudioStreamBasicDescription description;
1194 dataSize = sizeof( AudioStreamBasicDescription );
1195 property.mSelector = kAudioStreamPropertyVirtualFormat;
1196 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1197 if ( result != noErr ) {
1198 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1199 errorText_ = errorStream_.str();
1203 // Set the sample rate and data format id. However, only make the
1204 // change if the sample rate is not within 1.0 of the desired
1205 // rate and the format is not linear pcm.
1206 bool updateFormat = false;
1207 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1208 description.mSampleRate = (Float64) sampleRate;
1209 updateFormat = true;
1212 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1213 description.mFormatID = kAudioFormatLinearPCM;
1214 updateFormat = true;
1217 if ( updateFormat ) {
1218 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1219 if ( result != noErr ) {
1220 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1221 errorText_ = errorStream_.str();
1226 // Now check the physical format.
1227 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1228 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1229 if ( result != noErr ) {
1230 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1231 errorText_ = errorStream_.str();
1235 //std::cout << "Current physical stream format:" << std::endl;
1236 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1237 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1238 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1239 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1241 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1242 description.mFormatID = kAudioFormatLinearPCM;
1243 //description.mSampleRate = (Float64) sampleRate;
1244 AudioStreamBasicDescription testDescription = description;
1247 // We'll try higher bit rates first and then work our way down.
1248 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1254 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1256 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1258 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1262 bool setPhysicalFormat = false;
1263 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1264 testDescription = description;
1265 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1266 testDescription.mFormatFlags = physicalFormats[i].second;
1267 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1268 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1271 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1272 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1273 if ( result == noErr ) {
1274 setPhysicalFormat = true;
1275 //std::cout << "Updated physical stream format:" << std::endl;
1276 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1277 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1278 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1279 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1284 if ( !setPhysicalFormat ) {
1285 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1286 errorText_ = errorStream_.str();
1289 } // done setting virtual/physical formats.
1291 // Get the stream / device latency.
1293 dataSize = sizeof( UInt32 );
1294 property.mSelector = kAudioDevicePropertyLatency;
1295 if ( AudioObjectHasProperty( id, &property ) == true ) {
1296 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1297 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1299 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1300 errorText_ = errorStream_.str();
1301 error( RTAUDIO_WARNING );
1305 // Byte-swapping: According to AudioHardware.h, the stream data will
1306 // always be presented in native-endian format, so we should never
1307 // need to byte swap.
1308 stream_.doByteSwap[mode] = false;
1310 // From the CoreAudio documentation, PCM data must be supplied as
1312 stream_.userFormat = format;
1313 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1315 if ( streamCount == 1 )
1316 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1317 else // multiple streams
1318 stream_.nDeviceChannels[mode] = channels;
1319 stream_.nUserChannels[mode] = channels;
1320 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1321 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1322 else stream_.userInterleaved = true;
1323 stream_.deviceInterleaved[mode] = true;
1324 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1326 // Set flags for buffer conversion.
1327 stream_.doConvertBuffer[mode] = false;
1328 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1331 stream_.doConvertBuffer[mode] = true;
1332 if ( streamCount == 1 ) {
1333 if ( stream_.nUserChannels[mode] > 1 &&
1334 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1335 stream_.doConvertBuffer[mode] = true;
1337 else if ( monoMode && stream_.userInterleaved )
1338 stream_.doConvertBuffer[mode] = true;
1340 // Allocate our CoreHandle structure for the stream.
1341 CoreHandle *handle = 0;
1342 if ( stream_.apiHandle == 0 ) {
1344 handle = new CoreHandle;
1346 catch ( std::bad_alloc& ) {
1347 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1351 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1352 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1355 stream_.apiHandle = (void *) handle;
1358 handle = (CoreHandle *) stream_.apiHandle;
1359 handle->iStream[mode] = firstStream;
1360 handle->nStreams[mode] = streamCount;
1361 handle->id[mode] = id;
1363 // Allocate necessary internal buffers.
1364 unsigned long bufferBytes;
1365 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, sizeof(char) );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1433 if ( result != noErr ) {
1434 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1435 errorText_ = errorStream_.str();
1439 // Setup a listener to detect a possible device disconnect.
1440 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1441 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1442 if ( result != noErr ) {
1443 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1444 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1445 errorText_ = errorStream_.str();
1453 pthread_cond_destroy( &handle->condition );
1455 stream_.apiHandle = 0;
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1471 //stream_.state = STREAM_CLOSED;
1475 void RtApiCore :: closeStream( void )
1477 if ( stream_.state == STREAM_CLOSED ) {
1478 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1479 error( RTAUDIO_WARNING );
1483 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1484 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1486 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1487 kAudioObjectPropertyScopeGlobal,
1488 kAudioObjectPropertyElementMaster };
1490 property.mSelector = kAudioDeviceProcessorOverload;
1491 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1492 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1493 error( RTAUDIO_WARNING );
1495 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1496 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1497 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1498 error( RTAUDIO_WARNING );
1501 if ( stream_.state == STREAM_RUNNING )
1502 AudioDeviceStop( handle->id[0], callbackHandler );
1503 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1504 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1506 // deprecated in favor of AudioDeviceDestroyIOProcID()
1507 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1511 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1513 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1514 kAudioObjectPropertyScopeGlobal,
1515 kAudioObjectPropertyElementMaster };
1517 property.mSelector = kAudioDeviceProcessorOverload;
1518 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1519 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1520 error( RTAUDIO_WARNING );
1522 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1523 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1524 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1525 error( RTAUDIO_WARNING );
1528 if ( stream_.state == STREAM_RUNNING )
1529 AudioDeviceStop( handle->id[1], callbackHandler );
1530 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1531 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1533 // deprecated in favor of AudioDeviceDestroyIOProcID()
1534 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1538 for ( int i=0; i<2; i++ ) {
1539 if ( stream_.userBuffer[i] ) {
1540 free( stream_.userBuffer[i] );
1541 stream_.userBuffer[i] = 0;
1545 if ( stream_.deviceBuffer ) {
1546 free( stream_.deviceBuffer );
1547 stream_.deviceBuffer = 0;
1550 // Destroy pthread condition variable.
1551 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1552 pthread_cond_destroy( &handle->condition );
1554 stream_.apiHandle = 0;
1556 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1557 if ( info->deviceDisconnected ) {
1558 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1559 error( RTAUDIO_DEVICE_DISCONNECT );
1563 //stream_.mode = UNINITIALIZED;
1564 //stream_.state = STREAM_CLOSED;
1567 RtAudioErrorType RtApiCore :: startStream( void )
1569 if ( stream_.state != STREAM_STOPPED ) {
1570 if ( stream_.state == STREAM_RUNNING )
1571 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1572 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1573 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1574 return error( RTAUDIO_WARNING );
1578 #if defined( HAVE_GETTIMEOFDAY )
1579 gettimeofday( &stream_.lastTickTimestamp, NULL );
1583 OSStatus result = noErr;
1584 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1585 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1587 result = AudioDeviceStart( handle->id[0], callbackHandler );
1588 if ( result != noErr ) {
1589 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1590 errorText_ = errorStream_.str();
1595 if ( stream_.mode == INPUT ||
1596 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1598 // Clear user input buffer
1599 unsigned long bufferBytes;
1600 bufferBytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
1601 memset( stream_.userBuffer[1], 0, bufferBytes * sizeof(char) );
1603 result = AudioDeviceStart( handle->id[1], callbackHandler );
1604 if ( result != noErr ) {
1605 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1606 errorText_ = errorStream_.str();
1611 handle->drainCounter = 0;
1612 handle->internalDrain = false;
1613 stream_.state = STREAM_RUNNING;
1616 if ( result == noErr ) return RTAUDIO_NO_ERROR;
1617 return error( RTAUDIO_SYSTEM_ERROR );
1620 RtAudioErrorType RtApiCore :: stopStream( void )
1622 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1623 if ( stream_.state == STREAM_STOPPED )
1624 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1625 else if ( stream_.state == STREAM_CLOSED )
1626 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1627 return error( RTAUDIO_WARNING );
1630 OSStatus result = noErr;
1631 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1632 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1634 if ( handle->drainCounter == 0 ) {
1635 handle->drainCounter = 2;
1636 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1639 result = AudioDeviceStop( handle->id[0], callbackHandler );
1640 if ( result != noErr ) {
1641 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1642 errorText_ = errorStream_.str();
1647 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1649 result = AudioDeviceStop( handle->id[1], callbackHandler );
1650 if ( result != noErr ) {
1651 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1652 errorText_ = errorStream_.str();
1657 stream_.state = STREAM_STOPPED;
1660 if ( result == noErr ) return RTAUDIO_NO_ERROR;
1661 return error( RTAUDIO_SYSTEM_ERROR );
1664 RtAudioErrorType RtApiCore :: abortStream( void )
1666 if ( stream_.state != STREAM_RUNNING ) {
1667 if ( stream_.state == STREAM_STOPPED )
1668 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1669 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1670 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1671 return error( RTAUDIO_WARNING );
1675 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1676 handle->drainCounter = 2;
1678 stream_.state = STREAM_STOPPING;
1679 return stopStream();
1682 // This function will be called by a spawned thread when the user
1683 // callback function signals that the stream should be stopped or
1684 // aborted. It is better to handle it this way because the
1685 // callbackEvent() function probably should return before the AudioDeviceStop()
1686 // function is called.
1687 static void *coreStopStream( void *ptr )
1689 CallbackInfo *info = (CallbackInfo *) ptr;
1690 RtApiCore *object = (RtApiCore *) info->object;
1692 object->stopStream();
1693 pthread_exit( NULL );
1696 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1697 const AudioBufferList *inBufferList,
1698 const AudioBufferList *outBufferList )
1700 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1701 if ( stream_.state == STREAM_CLOSED ) {
1702 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1703 error( RTAUDIO_WARNING );
1707 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1708 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1710 // Check if we were draining the stream and signal is finished.
1711 if ( handle->drainCounter > 3 ) {
1712 ThreadHandle threadId;
1714 stream_.state = STREAM_STOPPING;
1715 if ( handle->internalDrain == true )
1716 pthread_create( &threadId, NULL, coreStopStream, info );
1717 else // external call to stopStream()
1718 pthread_cond_signal( &handle->condition );
1722 AudioDeviceID outputDevice = handle->id[0];
1724 // Invoke user callback to get fresh output data UNLESS we are
1725 // draining stream or duplex mode AND the input/output devices are
1726 // different AND this function is called for the input device.
1727 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1728 RtAudioCallback callback = (RtAudioCallback) info->callback;
1729 double streamTime = getStreamTime();
1730 RtAudioStreamStatus status = 0;
1731 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1732 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1733 handle->xrun[0] = false;
1735 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1736 status |= RTAUDIO_INPUT_OVERFLOW;
1737 handle->xrun[1] = false;
1740 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1741 stream_.bufferSize, streamTime, status, info->userData );
1742 if ( cbReturnValue == 2 ) {
1746 else if ( cbReturnValue == 1 ) {
1747 handle->drainCounter = 1;
1748 handle->internalDrain = true;
1752 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1754 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1756 if ( handle->nStreams[0] == 1 ) {
1757 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1759 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1761 else { // fill multiple streams with zeros
1762 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1763 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1765 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1769 else if ( handle->nStreams[0] == 1 ) {
1770 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1771 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1772 stream_.userBuffer[0], stream_.convertInfo[0] );
1774 else { // copy from user buffer
1775 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1776 stream_.userBuffer[0],
1777 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1780 else { // fill multiple streams
1781 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1782 if ( stream_.doConvertBuffer[0] ) {
1783 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1784 inBuffer = (Float32 *) stream_.deviceBuffer;
1787 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1788 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1789 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1790 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1791 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1794 else { // fill multiple multi-channel streams with interleaved data
1795 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1798 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1799 UInt32 inChannels = stream_.nUserChannels[0];
1800 if ( stream_.doConvertBuffer[0] ) {
1801 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1802 inChannels = stream_.nDeviceChannels[0];
1805 if ( inInterleaved ) inOffset = 1;
1806 else inOffset = stream_.bufferSize;
1808 channelsLeft = inChannels;
1809 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1811 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1812 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1815 // Account for possible channel offset in first stream
1816 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1817 streamChannels -= stream_.channelOffset[0];
1818 outJump = stream_.channelOffset[0];
1822 // Account for possible unfilled channels at end of the last stream
1823 if ( streamChannels > channelsLeft ) {
1824 outJump = streamChannels - channelsLeft;
1825 streamChannels = channelsLeft;
1828 // Determine input buffer offsets and skips
1829 if ( inInterleaved ) {
1830 inJump = inChannels;
1831 in += inChannels - channelsLeft;
1835 in += (inChannels - channelsLeft) * inOffset;
1838 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1839 for ( unsigned int j=0; j<streamChannels; j++ ) {
1840 *out++ = in[j*inOffset];
1845 channelsLeft -= streamChannels;
1851 // Don't bother draining input
1852 if ( handle->drainCounter ) {
1853 handle->drainCounter++;
1857 AudioDeviceID inputDevice;
1858 inputDevice = handle->id[1];
1859 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1861 if ( handle->nStreams[1] == 1 ) {
1862 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1863 convertBuffer( stream_.userBuffer[1],
1864 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1865 stream_.convertInfo[1] );
1867 else { // copy to user buffer
1868 memcpy( stream_.userBuffer[1],
1869 inBufferList->mBuffers[handle->iStream[1]].mData,
1870 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1873 else { // read from multiple streams
1874 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1875 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1877 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1878 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1879 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1880 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1881 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1884 else { // read from multiple multi-channel streams
1885 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1888 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1889 UInt32 outChannels = stream_.nUserChannels[1];
1890 if ( stream_.doConvertBuffer[1] ) {
1891 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1892 outChannels = stream_.nDeviceChannels[1];
1895 if ( outInterleaved ) outOffset = 1;
1896 else outOffset = stream_.bufferSize;
1898 channelsLeft = outChannels;
1899 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1901 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1902 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1905 // Account for possible channel offset in first stream
1906 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1907 streamChannels -= stream_.channelOffset[1];
1908 inJump = stream_.channelOffset[1];
1912 // Account for possible unread channels at end of the last stream
1913 if ( streamChannels > channelsLeft ) {
1914 inJump = streamChannels - channelsLeft;
1915 streamChannels = channelsLeft;
1918 // Determine output buffer offsets and skips
1919 if ( outInterleaved ) {
1920 outJump = outChannels;
1921 out += outChannels - channelsLeft;
1925 out += (outChannels - channelsLeft) * outOffset;
1928 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1929 for ( unsigned int j=0; j<streamChannels; j++ ) {
1930 out[j*outOffset] = *in++;
1935 channelsLeft -= streamChannels;
1939 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1940 convertBuffer( stream_.userBuffer[1],
1941 stream_.deviceBuffer,
1942 stream_.convertInfo[1] );
1949 // Make sure to only tick duplex stream time once if using two devices
1950 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1951 RtApi::tickStreamTime();
1956 const char* RtApiCore :: getErrorCode( OSStatus code )
1960 case kAudioHardwareNotRunningError:
1961 return "kAudioHardwareNotRunningError";
1963 case kAudioHardwareUnspecifiedError:
1964 return "kAudioHardwareUnspecifiedError";
1966 case kAudioHardwareUnknownPropertyError:
1967 return "kAudioHardwareUnknownPropertyError";
1969 case kAudioHardwareBadPropertySizeError:
1970 return "kAudioHardwareBadPropertySizeError";
1972 case kAudioHardwareIllegalOperationError:
1973 return "kAudioHardwareIllegalOperationError";
1975 case kAudioHardwareBadObjectError:
1976 return "kAudioHardwareBadObjectError";
1978 case kAudioHardwareBadDeviceError:
1979 return "kAudioHardwareBadDeviceError";
1981 case kAudioHardwareBadStreamError:
1982 return "kAudioHardwareBadStreamError";
1984 case kAudioHardwareUnsupportedOperationError:
1985 return "kAudioHardwareUnsupportedOperationError";
1987 case kAudioDeviceUnsupportedFormatError:
1988 return "kAudioDeviceUnsupportedFormatError";
1990 case kAudioDevicePermissionsError:
1991 return "kAudioDevicePermissionsError";
1994 return "CoreAudio unknown error";
1998 //******************** End of __MACOSX_CORE__ *********************//
2001 #if defined(__UNIX_JACK__)
2003 // JACK is a low-latency audio server, originally written for the
2004 // GNU/Linux operating system and now also ported to OS-X. It can
2005 // connect a number of different applications to an audio device, as
2006 // well as allowing them to share audio between themselves.
2008 // When using JACK with RtAudio, "devices" refer to JACK clients that
2009 // have ports connected to the server. The JACK server is typically
2010 // started in a terminal as follows:
2012 // .jackd -d alsa -d hw:0
2014 // or through an interface program such as qjackctl. Many of the
2015 // parameters normally set for a stream are fixed by the JACK server
2016 // and can be specified when the JACK server is started. In
2019 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2021 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2022 // frames, and number of buffers = 4. Once the server is running, it
2023 // is not possible to override these values. If the values are not
2024 // specified in the command-line, the JACK server uses default values.
2026 // The JACK server does not have to be running when an instance of
2027 // RtApiJack is created, though the function getDeviceCount() will
2028 // report 0 devices found until JACK has been started. When no
2029 // devices are available (i.e., the JACK server is not running), a
2030 // stream cannot be opened.
2032 #include <jack/jack.h>
2036 // A structure to hold various information related to the Jack API
2039 jack_client_t *client;
2040 jack_port_t **ports[2];
2041 std::string deviceName[2];
2043 pthread_cond_t condition;
2044 int drainCounter; // Tracks callback counts when draining
2045 bool internalDrain; // Indicates if stop is initiated from callback or not.
2048 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2051 #if !defined(__RTAUDIO_DEBUG__)
2052 static void jackSilentError( const char * ) {};
2055 RtApiJack :: RtApiJack()
2056 :shouldAutoconnect_(true) {
2057 // Nothing to do here.
2058 #if !defined(__RTAUDIO_DEBUG__)
2059 // Turn off Jack's internal error reporting.
2060 jack_set_error_function( &jackSilentError );
2064 RtApiJack :: ~RtApiJack()
2066 if ( stream_.state != STREAM_CLOSED ) closeStream();
2069 unsigned int RtApiJack :: getDeviceCount( void )
2071 // See if we can become a jack client.
2072 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2073 jack_status_t *status = NULL;
2074 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2075 if ( client == 0 ) return 0;
2078 std::string port, previousPort;
2079 unsigned int nChannels = 0, nDevices = 0;
2080 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2082 // Parse the port names up to the first colon (:).
2085 port = (char *) ports[ nChannels ];
2086 iColon = port.find(":");
2087 if ( iColon != std::string::npos ) {
2088 port = port.substr( 0, iColon + 1 );
2089 if ( port != previousPort ) {
2091 previousPort = port;
2094 } while ( ports[++nChannels] );
2098 jack_client_close( client );
2102 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2104 RtAudio::DeviceInfo info;
2105 info.probed = false;
2107 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2108 jack_status_t *status = NULL;
2109 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2110 if ( client == 0 ) {
2111 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2112 error( RTAUDIO_WARNING );
2117 std::string port, previousPort;
2118 unsigned int nPorts = 0, nDevices = 0;
2119 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2121 // Parse the port names up to the first colon (:).
2124 port = (char *) ports[ nPorts ];
2125 iColon = port.find(":");
2126 if ( iColon != std::string::npos ) {
2127 port = port.substr( 0, iColon );
2128 if ( port != previousPort ) {
2129 if ( nDevices == device ) info.name = port;
2131 previousPort = port;
2134 } while ( ports[++nPorts] );
2138 if ( device >= nDevices ) {
2139 jack_client_close( client );
2140 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2141 error( RtAudioError::INVALID_USE );
2145 // Get the current jack server sample rate.
2146 info.sampleRates.clear();
2148 info.preferredSampleRate = jack_get_sample_rate( client );
2149 info.sampleRates.push_back( info.preferredSampleRate );
2151 // Count the available ports containing the client name as device
2152 // channels. Jack "input ports" equal RtAudio output channels.
2153 unsigned int nChannels = 0;
2154 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2156 while ( ports[ nChannels ] ) nChannels++;
2158 info.outputChannels = nChannels;
2161 // Jack "output ports" equal RtAudio input channels.
2163 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2165 while ( ports[ nChannels ] ) nChannels++;
2167 info.inputChannels = nChannels;
2170 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2171 jack_client_close(client);
2172 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2173 error( RtAudioError::WARNING );
2177 // If device opens for both playback and capture, we determine the channels.
2178 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2179 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2181 // Jack always uses 32-bit floats.
2182 info.nativeFormats = RTAUDIO_FLOAT32;
2184 // Jack doesn't provide default devices so we'll use the first available one.
2185 if ( device == 0 && info.outputChannels > 0 )
2186 info.isDefaultOutput = true;
2187 if ( device == 0 && info.inputChannels > 0 )
2188 info.isDefaultInput = true;
2190 jack_client_close(client);
2195 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2197 CallbackInfo *info = (CallbackInfo *) infoPointer;
2199 RtApiJack *object = (RtApiJack *) info->object;
2200 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2205 // This function will be called by a spawned thread when the Jack
2206 // server signals that it is shutting down. It is necessary to handle
2207 // it this way because the jackShutdown() function must return before
2208 // the jack_deactivate() function (in closeStream()) will return.
2209 static void *jackCloseStream( void *ptr )
2211 CallbackInfo *info = (CallbackInfo *) ptr;
2212 RtApiJack *object = (RtApiJack *) info->object;
2214 object->closeStream();
2216 pthread_exit( NULL );
2218 static void jackShutdown( void *infoPointer )
2220 CallbackInfo *info = (CallbackInfo *) infoPointer;
2221 RtApiJack *object = (RtApiJack *) info->object;
2223 // Check current stream state. If stopped, then we'll assume this
2224 // was called as a result of a call to RtApiJack::stopStream (the
2225 // deactivation of a client handle causes this function to be called).
2226 // If not, we'll assume the Jack server is shutting down or some
2227 // other problem occurred and we should close the stream.
2228 if ( object->isStreamRunning() == false ) return;
2230 ThreadHandle threadId;
2231 pthread_create( &threadId, NULL, jackCloseStream, info );
2232 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2235 static int jackXrun( void *infoPointer )
2237 JackHandle *handle = *((JackHandle **) infoPointer);
2239 if ( handle->ports[0] ) handle->xrun[0] = true;
2240 if ( handle->ports[1] ) handle->xrun[1] = true;
2245 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2246 unsigned int firstChannel, unsigned int sampleRate,
2247 RtAudioFormat format, unsigned int *bufferSize,
2248 RtAudio::StreamOptions *options )
2250 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2252 // Look for jack server and try to become a client (only do once per stream).
2253 jack_client_t *client = 0;
2254 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2255 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2256 jack_status_t *status = NULL;
2257 if ( options && !options->streamName.empty() )
2258 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2260 client = jack_client_open( "RtApiJack", jackoptions, status );
2261 if ( client == 0 ) {
2262 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2263 error( RtAudioError::WARNING );
2268 // The handle must have been created on an earlier pass.
2269 client = handle->client;
2273 std::string port, previousPort, deviceName;
2274 unsigned int nPorts = 0, nDevices = 0;
2275 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2277 // Parse the port names up to the first colon (:).
2280 port = (char *) ports[ nPorts ];
2281 iColon = port.find(":");
2282 if ( iColon != std::string::npos ) {
2283 port = port.substr( 0, iColon );
2284 if ( port != previousPort ) {
2285 if ( nDevices == device ) deviceName = port;
2287 previousPort = port;
2290 } while ( ports[++nPorts] );
2294 if ( device >= nDevices ) {
2295 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2299 unsigned long flag = JackPortIsInput;
2300 if ( mode == INPUT ) flag = JackPortIsOutput;
2302 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2303 // Count the available ports containing the client name as device
2304 // channels. Jack "input ports" equal RtAudio output channels.
2305 unsigned int nChannels = 0;
2306 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2308 while ( ports[ nChannels ] ) nChannels++;
2311 // Compare the jack ports for specified client to the requested number of channels.
2312 if ( nChannels < (channels + firstChannel) ) {
2313 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2314 errorText_ = errorStream_.str();
2319 // Check the jack server sample rate.
2320 unsigned int jackRate = jack_get_sample_rate( client );
2321 if ( sampleRate != jackRate ) {
2322 jack_client_close( client );
2323 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2324 errorText_ = errorStream_.str();
2327 stream_.sampleRate = jackRate;
2329 // Get the latency of the JACK port.
2330 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2331 if ( ports[ firstChannel ] ) {
2333 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2334 // the range (usually the min and max are equal)
2335 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2336 // get the latency range
2337 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2338 // be optimistic, use the min!
2339 stream_.latency[mode] = latrange.min;
2340 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2344 // The jack server always uses 32-bit floating-point data.
2345 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2346 stream_.userFormat = format;
2348 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2349 else stream_.userInterleaved = true;
2351 // Jack always uses non-interleaved buffers.
2352 stream_.deviceInterleaved[mode] = false;
2354 // Jack always provides host byte-ordered data.
2355 stream_.doByteSwap[mode] = false;
2357 // Get the buffer size. The buffer size and number of buffers
2358 // (periods) is set when the jack server is started.
2359 stream_.bufferSize = (int) jack_get_buffer_size( client );
2360 *bufferSize = stream_.bufferSize;
2362 stream_.nDeviceChannels[mode] = channels;
2363 stream_.nUserChannels[mode] = channels;
2365 // Set flags for buffer conversion.
2366 stream_.doConvertBuffer[mode] = false;
2367 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2368 stream_.doConvertBuffer[mode] = true;
2369 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2370 stream_.nUserChannels[mode] > 1 )
2371 stream_.doConvertBuffer[mode] = true;
2373 // Allocate our JackHandle structure for the stream.
2374 if ( handle == 0 ) {
2376 handle = new JackHandle;
2378 catch ( std::bad_alloc& ) {
2379 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2383 if ( pthread_cond_init(&handle->condition, NULL) ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2387 stream_.apiHandle = (void *) handle;
2388 handle->client = client;
2390 handle->deviceName[mode] = deviceName;
2392 // Allocate necessary internal buffers.
2393 unsigned long bufferBytes;
2394 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2395 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2396 if ( stream_.userBuffer[mode] == NULL ) {
2397 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2401 if ( stream_.doConvertBuffer[mode] ) {
2403 bool makeBuffer = true;
2404 if ( mode == OUTPUT )
2405 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2406 else { // mode == INPUT
2407 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2408 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2409 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2410 if ( bufferBytes < bytesOut ) makeBuffer = false;
2415 bufferBytes *= *bufferSize;
2416 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2417 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2418 if ( stream_.deviceBuffer == NULL ) {
2419 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2425 // Allocate memory for the Jack ports (channels) identifiers.
2426 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2427 if ( handle->ports[mode] == NULL ) {
2428 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2432 stream_.device[mode] = device;
2433 stream_.channelOffset[mode] = firstChannel;
2434 stream_.state = STREAM_STOPPED;
2435 stream_.callbackInfo.object = (void *) this;
2437 if ( stream_.mode == OUTPUT && mode == INPUT )
2438 // We had already set up the stream for output.
2439 stream_.mode = DUPLEX;
2441 stream_.mode = mode;
2442 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2443 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2444 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2447 // Register our ports.
2449 if ( mode == OUTPUT ) {
2450 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2451 snprintf( label, 64, "outport %d", i );
2452 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2453 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2457 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2458 snprintf( label, 64, "inport %d", i );
2459 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2460 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2464 // Setup the buffer conversion information structure. We don't use
2465 // buffers to do channel offsets, so we override that parameter
2467 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2469 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2475 pthread_cond_destroy( &handle->condition );
2476 jack_client_close( handle->client );
2478 if ( handle->ports[0] ) free( handle->ports[0] );
2479 if ( handle->ports[1] ) free( handle->ports[1] );
2482 stream_.apiHandle = 0;
2485 for ( int i=0; i<2; i++ ) {
2486 if ( stream_.userBuffer[i] ) {
2487 free( stream_.userBuffer[i] );
2488 stream_.userBuffer[i] = 0;
2492 if ( stream_.deviceBuffer ) {
2493 free( stream_.deviceBuffer );
2494 stream_.deviceBuffer = 0;
2500 void RtApiJack :: closeStream( void )
2502 if ( stream_.state == STREAM_CLOSED ) {
2503 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2504 error( RtAudioError::WARNING );
2508 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2511 if ( stream_.state == STREAM_RUNNING )
2512 jack_deactivate( handle->client );
2514 jack_client_close( handle->client );
2518 if ( handle->ports[0] ) free( handle->ports[0] );
2519 if ( handle->ports[1] ) free( handle->ports[1] );
2520 pthread_cond_destroy( &handle->condition );
2522 stream_.apiHandle = 0;
2525 for ( int i=0; i<2; i++ ) {
2526 if ( stream_.userBuffer[i] ) {
2527 free( stream_.userBuffer[i] );
2528 stream_.userBuffer[i] = 0;
2532 if ( stream_.deviceBuffer ) {
2533 free( stream_.deviceBuffer );
2534 stream_.deviceBuffer = 0;
2537 stream_.mode = UNINITIALIZED;
2538 stream_.state = STREAM_CLOSED;
2541 void RtApiJack :: startStream( void )
2544 if ( stream_.state == STREAM_RUNNING ) {
2545 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2546 error( RtAudioError::WARNING );
2550 #if defined( HAVE_GETTIMEOFDAY )
2551 gettimeofday( &stream_.lastTickTimestamp, NULL );
2554 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2555 int result = jack_activate( handle->client );
2557 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2563 // Get the list of available ports.
2564 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2566 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2567 if ( ports == NULL) {
2568 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2572 // Now make the port connections. Since RtAudio wasn't designed to
2573 // allow the user to select particular channels of a device, we'll
2574 // just open the first "nChannels" ports with offset.
2575 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2577 if ( ports[ stream_.channelOffset[0] + i ] )
2578 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2581 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2588 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2590 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2591 if ( ports == NULL) {
2592 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2596 // Now make the port connections. See note above.
2597 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2599 if ( ports[ stream_.channelOffset[1] + i ] )
2600 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2603 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2610 handle->drainCounter = 0;
2611 handle->internalDrain = false;
2612 stream_.state = STREAM_RUNNING;
2615 if ( result == 0 ) return;
2616 error( RtAudioError::RTAUDIO_SYSTEM_ERROR );
2619 void RtApiJack :: stopStream( void )
2622 if ( stream_.state == STREAM_STOPPED ) {
2623 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2624 error( RtAudioError::WARNING );
2628 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2629 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2631 if ( handle->drainCounter == 0 ) {
2632 handle->drainCounter = 2;
2633 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2637 jack_deactivate( handle->client );
2638 stream_.state = STREAM_STOPPED;
2641 void RtApiJack :: abortStream( void )
2644 if ( stream_.state == STREAM_STOPPED ) {
2645 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2646 error( RtAudioError::WARNING );
2650 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2651 handle->drainCounter = 2;
2656 // This function will be called by a spawned thread when the user
2657 // callback function signals that the stream should be stopped or
2658 // aborted. It is necessary to handle it this way because the
2659 // callbackEvent() function must return before the jack_deactivate()
2660 // function will return.
2661 static void *jackStopStream( void *ptr )
2663 CallbackInfo *info = (CallbackInfo *) ptr;
2664 RtApiJack *object = (RtApiJack *) info->object;
2666 object->stopStream();
2667 pthread_exit( NULL );
2670 bool RtApiJack :: callbackEvent( unsigned long nframes )
2672 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2673 if ( stream_.state == STREAM_CLOSED ) {
2674 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2675 error( RtAudioError::WARNING );
2678 if ( stream_.bufferSize != nframes ) {
2679 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2680 error( RtAudioError::WARNING );
2684 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2685 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2687 // Check if we were draining the stream and signal is finished.
2688 if ( handle->drainCounter > 3 ) {
2689 ThreadHandle threadId;
2691 stream_.state = STREAM_STOPPING;
2692 if ( handle->internalDrain == true )
2693 pthread_create( &threadId, NULL, jackStopStream, info );
2695 pthread_cond_signal( &handle->condition );
2699 // Invoke user callback first, to get fresh output data.
2700 if ( handle->drainCounter == 0 ) {
2701 RtAudioCallback callback = (RtAudioCallback) info->callback;
2702 double streamTime = getStreamTime();
2703 RtAudioStreamStatus status = 0;
2704 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2705 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2706 handle->xrun[0] = false;
2708 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2709 status |= RTAUDIO_INPUT_OVERFLOW;
2710 handle->xrun[1] = false;
2712 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2713 stream_.bufferSize, streamTime, status, info->userData );
2714 if ( cbReturnValue == 2 ) {
2715 stream_.state = STREAM_STOPPING;
2716 handle->drainCounter = 2;
2718 pthread_create( &id, NULL, jackStopStream, info );
2721 else if ( cbReturnValue == 1 ) {
2722 handle->drainCounter = 1;
2723 handle->internalDrain = true;
2727 jack_default_audio_sample_t *jackbuffer;
2728 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2729 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2731 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2733 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2734 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2735 memset( jackbuffer, 0, bufferBytes );
2739 else if ( stream_.doConvertBuffer[0] ) {
2741 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2743 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2744 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2745 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2748 else { // no buffer conversion
2749 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2750 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2751 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2756 // Don't bother draining input
2757 if ( handle->drainCounter ) {
2758 handle->drainCounter++;
2762 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2764 if ( stream_.doConvertBuffer[1] ) {
2765 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2766 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2767 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2769 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2771 else { // no buffer conversion
2772 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2773 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2774 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2780 RtApi::tickStreamTime();
2783 //******************** End of __UNIX_JACK__ *********************//
2786 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2788 // The ASIO API is designed around a callback scheme, so this
2789 // implementation is similar to that used for OS-X CoreAudio and Linux
2790 // Jack. The primary constraint with ASIO is that it only allows
2791 // access to a single driver at a time. Thus, it is not possible to
2792 // have more than one simultaneous RtAudio stream.
2794 // This implementation also requires a number of external ASIO files
2795 // and a few global variables. The ASIO callback scheme does not
2796 // allow for the passing of user data, so we must create a global
2797 // pointer to our callbackInfo structure.
2799 // On unix systems, we make use of a pthread condition variable.
2800 // Since there is no equivalent in Windows, I hacked something based
2801 // on information found in
2802 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2804 #include "asiosys.h"
2806 #include "iasiothiscallresolver.h"
2807 #include "asiodrivers.h"
2810 static AsioDrivers drivers;
2811 static ASIOCallbacks asioCallbacks;
2812 static ASIODriverInfo driverInfo;
2813 static CallbackInfo *asioCallbackInfo;
2814 static bool asioXRun;
2817 int drainCounter; // Tracks callback counts when draining
2818 bool internalDrain; // Indicates if stop is initiated from callback or not.
2819 ASIOBufferInfo *bufferInfos;
2823 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2826 // Function declarations (definitions at end of section)
2827 static const char* getAsioErrorString( ASIOError result );
2828 static void sampleRateChanged( ASIOSampleRate sRate );
2829 static long asioMessages( long selector, long value, void* message, double* opt );
2831 RtApiAsio :: RtApiAsio()
2833 // ASIO cannot run on a multi-threaded appartment. You can call
2834 // CoInitialize beforehand, but it must be for appartment threading
2835 // (in which case, CoInitilialize will return S_FALSE here).
2836 coInitialized_ = false;
2837 HRESULT hr = CoInitialize( NULL );
2839 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2840 error( RtAudioError::WARNING );
2842 coInitialized_ = true;
2844 drivers.removeCurrentDriver();
2845 driverInfo.asioVersion = 2;
2847 // See note in DirectSound implementation about GetDesktopWindow().
2848 driverInfo.sysRef = GetForegroundWindow();
2851 RtApiAsio :: ~RtApiAsio()
2853 if ( stream_.state != STREAM_CLOSED ) closeStream();
2854 if ( coInitialized_ ) CoUninitialize();
2857 unsigned int RtApiAsio :: getDeviceCount( void )
2859 return (unsigned int) drivers.asioGetNumDev();
2862 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2864 RtAudio::DeviceInfo info;
2865 info.probed = false;
2868 unsigned int nDevices = getDeviceCount();
2869 if ( nDevices == 0 ) {
2870 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2871 error( RtAudioError::INVALID_USE );
2875 if ( device >= nDevices ) {
2876 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2877 error( RtAudioError::INVALID_USE );
2881 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2882 if ( stream_.state != STREAM_CLOSED ) {
2883 if ( device >= devices_.size() ) {
2884 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2885 error( RtAudioError::WARNING );
2888 return devices_[ device ];
2891 char driverName[32];
2892 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2893 if ( result != ASE_OK ) {
2894 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2895 errorText_ = errorStream_.str();
2896 error( RtAudioError::WARNING );
2900 info.name = driverName;
2902 if ( !drivers.loadDriver( driverName ) ) {
2903 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2904 errorText_ = errorStream_.str();
2905 error( RtAudioError::WARNING );
2909 result = ASIOInit( &driverInfo );
2910 if ( result != ASE_OK ) {
2911 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2912 errorText_ = errorStream_.str();
2913 error( RtAudioError::WARNING );
2917 // Determine the device channel information.
2918 long inputChannels, outputChannels;
2919 result = ASIOGetChannels( &inputChannels, &outputChannels );
2920 if ( result != ASE_OK ) {
2921 drivers.removeCurrentDriver();
2922 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2923 errorText_ = errorStream_.str();
2924 error( RtAudioError::WARNING );
2928 info.outputChannels = outputChannels;
2929 info.inputChannels = inputChannels;
2930 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2931 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2933 // Determine the supported sample rates.
2934 info.sampleRates.clear();
2935 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2936 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2937 if ( result == ASE_OK ) {
2938 info.sampleRates.push_back( SAMPLE_RATES[i] );
2940 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2941 info.preferredSampleRate = SAMPLE_RATES[i];
2945 // Determine supported data types ... just check first channel and assume rest are the same.
2946 ASIOChannelInfo channelInfo;
2947 channelInfo.channel = 0;
2948 channelInfo.isInput = true;
2949 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2950 result = ASIOGetChannelInfo( &channelInfo );
2951 if ( result != ASE_OK ) {
2952 drivers.removeCurrentDriver();
2953 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2954 errorText_ = errorStream_.str();
2955 error( RtAudioError::WARNING );
2959 info.nativeFormats = 0;
2960 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2961 info.nativeFormats |= RTAUDIO_SINT16;
2962 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2963 info.nativeFormats |= RTAUDIO_SINT32;
2964 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2965 info.nativeFormats |= RTAUDIO_FLOAT32;
2966 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2967 info.nativeFormats |= RTAUDIO_FLOAT64;
2968 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2969 info.nativeFormats |= RTAUDIO_SINT24;
2971 if ( info.outputChannels > 0 )
2972 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2973 if ( info.inputChannels > 0 )
2974 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2977 drivers.removeCurrentDriver();
2981 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2983 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2984 object->callbackEvent( index );
2987 void RtApiAsio :: saveDeviceInfo( void )
2991 unsigned int nDevices = getDeviceCount();
2992 devices_.resize( nDevices );
2993 for ( unsigned int i=0; i<nDevices; i++ )
2994 devices_[i] = getDeviceInfo( i );
2997 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2998 unsigned int firstChannel, unsigned int sampleRate,
2999 RtAudioFormat format, unsigned int *bufferSize,
3000 RtAudio::StreamOptions *options )
3001 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3003 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3005 // For ASIO, a duplex stream MUST use the same driver.
3006 if ( isDuplexInput && stream_.device[0] != device ) {
3007 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3011 char driverName[32];
3012 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3013 if ( result != ASE_OK ) {
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3015 errorText_ = errorStream_.str();
3019 // Only load the driver once for duplex stream.
3020 if ( !isDuplexInput ) {
3021 // The getDeviceInfo() function will not work when a stream is open
3022 // because ASIO does not allow multiple devices to run at the same
3023 // time. Thus, we'll probe the system before opening a stream and
3024 // save the results for use by getDeviceInfo().
3025 this->saveDeviceInfo();
3027 if ( !drivers.loadDriver( driverName ) ) {
3028 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3029 errorText_ = errorStream_.str();
3033 result = ASIOInit( &driverInfo );
3034 if ( result != ASE_OK ) {
3035 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3036 errorText_ = errorStream_.str();
3041 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3042 bool buffersAllocated = false;
3043 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3044 unsigned int nChannels;
3047 // Check the device channel count.
3048 long inputChannels, outputChannels;
3049 result = ASIOGetChannels( &inputChannels, &outputChannels );
3050 if ( result != ASE_OK ) {
3051 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3052 errorText_ = errorStream_.str();
3056 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3057 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3058 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3059 errorText_ = errorStream_.str();
3062 stream_.nDeviceChannels[mode] = channels;
3063 stream_.nUserChannels[mode] = channels;
3064 stream_.channelOffset[mode] = firstChannel;
3066 // Verify the sample rate is supported.
3067 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3068 if ( result != ASE_OK ) {
3069 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3070 errorText_ = errorStream_.str();
3074 // Get the current sample rate
3075 ASIOSampleRate currentRate;
3076 result = ASIOGetSampleRate( ¤tRate );
3077 if ( result != ASE_OK ) {
3078 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3079 errorText_ = errorStream_.str();
3083 // Set the sample rate only if necessary
3084 if ( currentRate != sampleRate ) {
3085 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3086 if ( result != ASE_OK ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3088 errorText_ = errorStream_.str();
3093 // Determine the driver data type.
3094 ASIOChannelInfo channelInfo;
3095 channelInfo.channel = 0;
3096 if ( mode == OUTPUT ) channelInfo.isInput = false;
3097 else channelInfo.isInput = true;
3098 result = ASIOGetChannelInfo( &channelInfo );
3099 if ( result != ASE_OK ) {
3100 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3101 errorText_ = errorStream_.str();
3105 // Assuming WINDOWS host is always little-endian.
3106 stream_.doByteSwap[mode] = false;
3107 stream_.userFormat = format;
3108 stream_.deviceFormat[mode] = 0;
3109 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3110 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3111 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3113 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3114 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3115 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3117 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3118 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3119 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3121 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3122 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3123 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3125 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3126 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3127 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3130 if ( stream_.deviceFormat[mode] == 0 ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3132 errorText_ = errorStream_.str();
3136 // Set the buffer size. For a duplex stream, this will end up
3137 // setting the buffer size based on the input constraints, which
3139 long minSize, maxSize, preferSize, granularity;
3140 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3141 if ( result != ASE_OK ) {
3142 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3143 errorText_ = errorStream_.str();
3147 if ( isDuplexInput ) {
3148 // When this is the duplex input (output was opened before), then we have to use the same
3149 // buffersize as the output, because it might use the preferred buffer size, which most
3150 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3151 // So instead of throwing an error, make them equal. The caller uses the reference
3152 // to the "bufferSize" param as usual to set up processing buffers.
3154 *bufferSize = stream_.bufferSize;
3157 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3158 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3159 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3160 else if ( granularity == -1 ) {
3161 // Make sure bufferSize is a power of two.
3162 int log2_of_min_size = 0;
3163 int log2_of_max_size = 0;
3165 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3166 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3167 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3170 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3171 int min_delta_num = log2_of_min_size;
3173 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3174 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3175 if (current_delta < min_delta) {
3176 min_delta = current_delta;
3181 *bufferSize = ( (unsigned int)1 << min_delta_num );
3182 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3183 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3185 else if ( granularity != 0 ) {
3186 // Set to an even multiple of granularity, rounding up.
3187 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3192 // we don't use it anymore, see above!
3193 // Just left it here for the case...
3194 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3195 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3200 stream_.bufferSize = *bufferSize;
3201 stream_.nBuffers = 2;
3203 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3204 else stream_.userInterleaved = true;
3206 // ASIO always uses non-interleaved buffers.
3207 stream_.deviceInterleaved[mode] = false;
3209 // Allocate, if necessary, our AsioHandle structure for the stream.
3210 if ( handle == 0 ) {
3212 handle = new AsioHandle;
3214 catch ( std::bad_alloc& ) {
3215 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3218 handle->bufferInfos = 0;
3220 // Create a manual-reset event.
3221 handle->condition = CreateEvent( NULL, // no security
3222 TRUE, // manual-reset
3223 FALSE, // non-signaled initially
3225 stream_.apiHandle = (void *) handle;
3228 // Create the ASIO internal buffers. Since RtAudio sets up input
3229 // and output separately, we'll have to dispose of previously
3230 // created output buffers for a duplex stream.
3231 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3232 ASIODisposeBuffers();
3233 if ( handle->bufferInfos ) free( handle->bufferInfos );
3236 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3238 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3239 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3240 if ( handle->bufferInfos == NULL ) {
3241 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3242 errorText_ = errorStream_.str();
3246 ASIOBufferInfo *infos;
3247 infos = handle->bufferInfos;
3248 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3249 infos->isInput = ASIOFalse;
3250 infos->channelNum = i + stream_.channelOffset[0];
3251 infos->buffers[0] = infos->buffers[1] = 0;
3253 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3254 infos->isInput = ASIOTrue;
3255 infos->channelNum = i + stream_.channelOffset[1];
3256 infos->buffers[0] = infos->buffers[1] = 0;
3259 // prepare for callbacks
3260 stream_.sampleRate = sampleRate;
3261 stream_.device[mode] = device;
3262 stream_.mode = isDuplexInput ? DUPLEX : mode;
3264 // store this class instance before registering callbacks, that are going to use it
3265 asioCallbackInfo = &stream_.callbackInfo;
3266 stream_.callbackInfo.object = (void *) this;
3268 // Set up the ASIO callback structure and create the ASIO data buffers.
3269 asioCallbacks.bufferSwitch = &bufferSwitch;
3270 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3271 asioCallbacks.asioMessage = &asioMessages;
3272 asioCallbacks.bufferSwitchTimeInfo = NULL;
3273 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3274 if ( result != ASE_OK ) {
3275 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3276 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3277 // In that case, let's be naïve and try that instead.
3278 *bufferSize = preferSize;
3279 stream_.bufferSize = *bufferSize;
3280 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3283 if ( result != ASE_OK ) {
3284 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3285 errorText_ = errorStream_.str();
3288 buffersAllocated = true;
3289 stream_.state = STREAM_STOPPED;
3291 // Set flags for buffer conversion.
3292 stream_.doConvertBuffer[mode] = false;
3293 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3294 stream_.doConvertBuffer[mode] = true;
3295 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3296 stream_.nUserChannels[mode] > 1 )
3297 stream_.doConvertBuffer[mode] = true;
3299 // Allocate necessary internal buffers
3300 unsigned long bufferBytes;
3301 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3302 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3303 if ( stream_.userBuffer[mode] == NULL ) {
3304 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3308 if ( stream_.doConvertBuffer[mode] ) {
3310 bool makeBuffer = true;
3311 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3312 if ( isDuplexInput && stream_.deviceBuffer ) {
3313 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3314 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3318 bufferBytes *= *bufferSize;
3319 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3320 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3321 if ( stream_.deviceBuffer == NULL ) {
3322 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3328 // Determine device latencies
3329 long inputLatency, outputLatency;
3330 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3331 if ( result != ASE_OK ) {
3332 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3333 errorText_ = errorStream_.str();
3334 error( RtAudioError::WARNING); // warn but don't fail
3337 stream_.latency[0] = outputLatency;
3338 stream_.latency[1] = inputLatency;
3341 // Setup the buffer conversion information structure. We don't use
3342 // buffers to do channel offsets, so we override that parameter
3344 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3349 if ( !isDuplexInput ) {
3350 // the cleanup for error in the duplex input, is done by RtApi::openStream
3351 // So we clean up for single channel only
3353 if ( buffersAllocated )
3354 ASIODisposeBuffers();
3356 drivers.removeCurrentDriver();
3359 CloseHandle( handle->condition );
3360 if ( handle->bufferInfos )
3361 free( handle->bufferInfos );
3364 stream_.apiHandle = 0;
3368 if ( stream_.userBuffer[mode] ) {
3369 free( stream_.userBuffer[mode] );
3370 stream_.userBuffer[mode] = 0;
3373 if ( stream_.deviceBuffer ) {
3374 free( stream_.deviceBuffer );
3375 stream_.deviceBuffer = 0;
3380 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3382 void RtApiAsio :: closeStream()
3384 if ( stream_.state == STREAM_CLOSED ) {
3385 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3386 error( RtAudioError::WARNING );
3390 if ( stream_.state == STREAM_RUNNING ) {
3391 stream_.state = STREAM_STOPPED;
3394 ASIODisposeBuffers();
3395 drivers.removeCurrentDriver();
3397 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3399 CloseHandle( handle->condition );
3400 if ( handle->bufferInfos )
3401 free( handle->bufferInfos );
3403 stream_.apiHandle = 0;
3406 for ( int i=0; i<2; i++ ) {
3407 if ( stream_.userBuffer[i] ) {
3408 free( stream_.userBuffer[i] );
3409 stream_.userBuffer[i] = 0;
3413 if ( stream_.deviceBuffer ) {
3414 free( stream_.deviceBuffer );
3415 stream_.deviceBuffer = 0;
3418 stream_.mode = UNINITIALIZED;
3419 stream_.state = STREAM_CLOSED;
3422 bool stopThreadCalled = false;
3424 void RtApiAsio :: startStream()
3427 if ( stream_.state == STREAM_RUNNING ) {
3428 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3429 error( RtAudioError::WARNING );
3433 #if defined( HAVE_GETTIMEOFDAY )
3434 gettimeofday( &stream_.lastTickTimestamp, NULL );
3437 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3438 ASIOError result = ASIOStart();
3439 if ( result != ASE_OK ) {
3440 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3441 errorText_ = errorStream_.str();
3445 handle->drainCounter = 0;
3446 handle->internalDrain = false;
3447 ResetEvent( handle->condition );
3448 stream_.state = STREAM_RUNNING;
3452 stopThreadCalled = false;
3454 if ( result == ASE_OK ) return;
3455 error( RtAudioError::SYSTEM_ERROR );
3458 void RtApiAsio :: stopStream()
3461 if ( stream_.state == STREAM_STOPPED ) {
3462 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3463 error( RtAudioError::WARNING );
3467 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3469 if ( handle->drainCounter == 0 ) {
3470 handle->drainCounter = 2;
3471 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3475 stream_.state = STREAM_STOPPED;
3477 ASIOError result = ASIOStop();
3478 if ( result != ASE_OK ) {
3479 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3480 errorText_ = errorStream_.str();
3483 if ( result == ASE_OK ) return;
3484 error( RtAudioError::SYSTEM_ERROR );
3487 void RtApiAsio :: abortStream()
3490 if ( stream_.state == STREAM_STOPPED ) {
3491 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3492 error( RtAudioError::WARNING );
3496 // The following lines were commented-out because some behavior was
3497 // noted where the device buffers need to be zeroed to avoid
3498 // continuing sound, even when the device buffers are completely
3499 // disposed. So now, calling abort is the same as calling stop.
3500 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3501 // handle->drainCounter = 2;
3505 // This function will be called by a spawned thread when the user
3506 // callback function signals that the stream should be stopped or
3507 // aborted. It is necessary to handle it this way because the
3508 // callbackEvent() function must return before the ASIOStop()
3509 // function will return.
3510 static unsigned __stdcall asioStopStream( void *ptr )
3512 CallbackInfo *info = (CallbackInfo *) ptr;
3513 RtApiAsio *object = (RtApiAsio *) info->object;
3515 object->stopStream();
3520 bool RtApiAsio :: callbackEvent( long bufferIndex )
3522 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3523 if ( stream_.state == STREAM_CLOSED ) {
3524 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3525 error( RtAudioError::WARNING );
3529 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3530 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3532 // Check if we were draining the stream and signal if finished.
3533 if ( handle->drainCounter > 3 ) {
3535 stream_.state = STREAM_STOPPING;
3536 if ( handle->internalDrain == false )
3537 SetEvent( handle->condition );
3538 else { // spawn a thread to stop the stream
3540 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3541 &stream_.callbackInfo, 0, &threadId );
3546 // Invoke user callback to get fresh output data UNLESS we are
3548 if ( handle->drainCounter == 0 ) {
3549 RtAudioCallback callback = (RtAudioCallback) info->callback;
3550 double streamTime = getStreamTime();
3551 RtAudioStreamStatus status = 0;
3552 if ( stream_.mode != INPUT && asioXRun == true ) {
3553 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3556 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3557 status |= RTAUDIO_INPUT_OVERFLOW;
3560 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3561 stream_.bufferSize, streamTime, status, info->userData );
3562 if ( cbReturnValue == 2 ) {
3563 stream_.state = STREAM_STOPPING;
3564 handle->drainCounter = 2;
3566 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3567 &stream_.callbackInfo, 0, &threadId );
3570 else if ( cbReturnValue == 1 ) {
3571 handle->drainCounter = 1;
3572 handle->internalDrain = true;
3576 unsigned int nChannels, bufferBytes, i, j;
3577 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3578 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3580 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3582 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3584 for ( i=0, j=0; i<nChannels; i++ ) {
3585 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3586 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3590 else if ( stream_.doConvertBuffer[0] ) {
3592 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3593 if ( stream_.doByteSwap[0] )
3594 byteSwapBuffer( stream_.deviceBuffer,
3595 stream_.bufferSize * stream_.nDeviceChannels[0],
3596 stream_.deviceFormat[0] );
3598 for ( i=0, j=0; i<nChannels; i++ ) {
3599 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3600 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3601 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3607 if ( stream_.doByteSwap[0] )
3608 byteSwapBuffer( stream_.userBuffer[0],
3609 stream_.bufferSize * stream_.nUserChannels[0],
3610 stream_.userFormat );
3612 for ( i=0, j=0; i<nChannels; i++ ) {
3613 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3614 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3615 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3621 // Don't bother draining input
3622 if ( handle->drainCounter ) {
3623 handle->drainCounter++;
3627 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3629 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3631 if (stream_.doConvertBuffer[1]) {
3633 // Always interleave ASIO input data.
3634 for ( i=0, j=0; i<nChannels; i++ ) {
3635 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3636 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3637 handle->bufferInfos[i].buffers[bufferIndex],
3641 if ( stream_.doByteSwap[1] )
3642 byteSwapBuffer( stream_.deviceBuffer,
3643 stream_.bufferSize * stream_.nDeviceChannels[1],
3644 stream_.deviceFormat[1] );
3645 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3649 for ( i=0, j=0; i<nChannels; i++ ) {
3650 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3651 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3652 handle->bufferInfos[i].buffers[bufferIndex],
3657 if ( stream_.doByteSwap[1] )
3658 byteSwapBuffer( stream_.userBuffer[1],
3659 stream_.bufferSize * stream_.nUserChannels[1],
3660 stream_.userFormat );
3665 // The following call was suggested by Malte Clasen. While the API
3666 // documentation indicates it should not be required, some device
3667 // drivers apparently do not function correctly without it.
3670 RtApi::tickStreamTime();
3674 static void sampleRateChanged( ASIOSampleRate sRate )
3676 // The ASIO documentation says that this usually only happens during
3677 // external sync. Audio processing is not stopped by the driver,
3678 // actual sample rate might not have even changed, maybe only the
3679 // sample rate status of an AES/EBU or S/PDIF digital input at the
3682 RtApi *object = (RtApi *) asioCallbackInfo->object;
3684 object->stopStream();
3686 catch ( RtAudioError &exception ) {
3687 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3691 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3694 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3698 switch( selector ) {
3699 case kAsioSelectorSupported:
3700 if ( value == kAsioResetRequest
3701 || value == kAsioEngineVersion
3702 || value == kAsioResyncRequest
3703 || value == kAsioLatenciesChanged
3704 // The following three were added for ASIO 2.0, you don't
3705 // necessarily have to support them.
3706 || value == kAsioSupportsTimeInfo
3707 || value == kAsioSupportsTimeCode
3708 || value == kAsioSupportsInputMonitor)
3711 case kAsioResetRequest:
3712 // Defer the task and perform the reset of the driver during the
3713 // next "safe" situation. You cannot reset the driver right now,
3714 // as this code is called from the driver. Reset the driver is
3715 // done by completely destruct is. I.e. ASIOStop(),
3716 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3718 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3721 case kAsioResyncRequest:
3722 // This informs the application that the driver encountered some
3723 // non-fatal data loss. It is used for synchronization purposes
3724 // of different media. Added mainly to work around the Win16Mutex
3725 // problems in Windows 95/98 with the Windows Multimedia system,
3726 // which could lose data because the Mutex was held too long by
3727 // another thread. However a driver can issue it in other
3729 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3733 case kAsioLatenciesChanged:
3734 // This will inform the host application that the drivers were
3735 // latencies changed. Beware, it this does not mean that the
3736 // buffer sizes have changed! You might need to update internal
3738 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3741 case kAsioEngineVersion:
3742 // Return the supported ASIO version of the host application. If
3743 // a host application does not implement this selector, ASIO 1.0
3744 // is assumed by the driver.
3747 case kAsioSupportsTimeInfo:
3748 // Informs the driver whether the
3749 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3750 // For compatibility with ASIO 1.0 drivers the host application
3751 // should always support the "old" bufferSwitch method, too.
3754 case kAsioSupportsTimeCode:
3755 // Informs the driver whether application is interested in time
3756 // code info. If an application does not need to know about time
3757 // code, the driver has less work to do.
3764 static const char* getAsioErrorString( ASIOError result )
3772 static const Messages m[] =
3774 { ASE_NotPresent, "Hardware input or output is not present or available." },
3775 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3776 { ASE_InvalidParameter, "Invalid input parameter." },
3777 { ASE_InvalidMode, "Invalid mode." },
3778 { ASE_SPNotAdvancing, "Sample position not advancing." },
3779 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3780 { ASE_NoMemory, "Not enough memory to complete the request." }
3783 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3784 if ( m[i].value == result ) return m[i].message;
3786 return "Unknown error.";
3789 //******************** End of __WINDOWS_ASIO__ *********************//
3793 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3795 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3796 // - Introduces support for the Windows WASAPI API
3797 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3798 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3799 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3806 #include <mferror.h>
3808 #include <mftransform.h>
3809 #include <wmcodecdsp.h>
3811 #include <audioclient.h>
3813 #include <mmdeviceapi.h>
3814 #include <functiondiscoverykeys_devpkey.h>
3816 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3817 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3820 #ifndef MFSTARTUP_NOSOCKET
3821 #define MFSTARTUP_NOSOCKET 0x1
3825 #pragma comment( lib, "ksuser" )
3826 #pragma comment( lib, "mfplat.lib" )
3827 #pragma comment( lib, "mfuuid.lib" )
3828 #pragma comment( lib, "wmcodecdspuuid" )
3831 //=============================================================================
3833 #define SAFE_RELEASE( objectPtr )\
3836 objectPtr->Release();\
3840 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3842 //-----------------------------------------------------------------------------
3844 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3845 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3846 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3847 // provide intermediate storage for read / write synchronization.
3861 // sets the length of the internal ring buffer
3862 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3865 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3867 bufferSize_ = bufferSize;
3872 // attempt to push a buffer into the ring buffer at the current "in" index
3873 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3875 if ( !buffer || // incoming buffer is NULL
3876 bufferSize == 0 || // incoming buffer has no data
3877 bufferSize > bufferSize_ ) // incoming buffer too large
3882 unsigned int relOutIndex = outIndex_;
3883 unsigned int inIndexEnd = inIndex_ + bufferSize;
3884 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3885 relOutIndex += bufferSize_;
3888 // the "IN" index CAN BEGIN at the "OUT" index
3889 // the "IN" index CANNOT END at the "OUT" index
3890 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3891 return false; // not enough space between "in" index and "out" index
3894 // copy buffer from external to internal
3895 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3896 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3897 int fromInSize = bufferSize - fromZeroSize;
3902 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3903 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3905 case RTAUDIO_SINT16:
3906 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3907 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3909 case RTAUDIO_SINT24:
3910 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3911 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3913 case RTAUDIO_SINT32:
3914 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3915 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3917 case RTAUDIO_FLOAT32:
3918 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3919 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3921 case RTAUDIO_FLOAT64:
3922 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3923 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3927 // update "in" index
3928 inIndex_ += bufferSize;
3929 inIndex_ %= bufferSize_;
3934 // attempt to pull a buffer from the ring buffer from the current "out" index
3935 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3937 if ( !buffer || // incoming buffer is NULL
3938 bufferSize == 0 || // incoming buffer has no data
3939 bufferSize > bufferSize_ ) // incoming buffer too large
3944 unsigned int relInIndex = inIndex_;
3945 unsigned int outIndexEnd = outIndex_ + bufferSize;
3946 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3947 relInIndex += bufferSize_;
3950 // the "OUT" index CANNOT BEGIN at the "IN" index
3951 // the "OUT" index CAN END at the "IN" index
3952 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3953 return false; // not enough space between "out" index and "in" index
3956 // copy buffer from internal to external
3957 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3958 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3959 int fromOutSize = bufferSize - fromZeroSize;
3964 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3965 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3967 case RTAUDIO_SINT16:
3968 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3969 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3971 case RTAUDIO_SINT24:
3972 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3973 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3975 case RTAUDIO_SINT32:
3976 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3977 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3979 case RTAUDIO_FLOAT32:
3980 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3981 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3983 case RTAUDIO_FLOAT64:
3984 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3985 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3989 // update "out" index
3990 outIndex_ += bufferSize;
3991 outIndex_ %= bufferSize_;
3998 unsigned int bufferSize_;
3999 unsigned int inIndex_;
4000 unsigned int outIndex_;
4003 //-----------------------------------------------------------------------------
4005 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4006 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4007 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4008 class WasapiResampler
4011 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4012 unsigned int inSampleRate, unsigned int outSampleRate )
4013 : _bytesPerSample( bitsPerSample / 8 )
4014 , _channelCount( channelCount )
4015 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4016 , _transformUnk( NULL )
4017 , _transform( NULL )
4018 , _mediaType( NULL )
4019 , _inputMediaType( NULL )
4020 , _outputMediaType( NULL )
4022 #ifdef __IWMResamplerProps_FWD_DEFINED__
4023 , _resamplerProps( NULL )
4026 // 1. Initialization
4028 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4030 // 2. Create Resampler Transform Object
4032 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4033 IID_IUnknown, ( void** ) &_transformUnk );
4035 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4037 #ifdef __IWMResamplerProps_FWD_DEFINED__
4038 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4039 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4042 // 3. Specify input / output format
4044 MFCreateMediaType( &_mediaType );
4045 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4046 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4047 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4048 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4049 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4050 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4051 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4052 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4054 MFCreateMediaType( &_inputMediaType );
4055 _mediaType->CopyAllItems( _inputMediaType );
4057 _transform->SetInputType( 0, _inputMediaType, 0 );
4059 MFCreateMediaType( &_outputMediaType );
4060 _mediaType->CopyAllItems( _outputMediaType );
4062 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4063 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4065 _transform->SetOutputType( 0, _outputMediaType, 0 );
4067 // 4. Send stream start messages to Resampler
4069 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4070 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4071 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4076 // 8. Send stream stop messages to Resampler
4078 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4079 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4085 SAFE_RELEASE( _transformUnk );
4086 SAFE_RELEASE( _transform );
4087 SAFE_RELEASE( _mediaType );
4088 SAFE_RELEASE( _inputMediaType );
4089 SAFE_RELEASE( _outputMediaType );
4091 #ifdef __IWMResamplerProps_FWD_DEFINED__
4092 SAFE_RELEASE( _resamplerProps );
4096 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4098 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4099 if ( _sampleRatio == 1 )
4101 // no sample rate conversion required
4102 memcpy( outBuffer, inBuffer, inputBufferSize );
4103 outSampleCount = inSampleCount;
4107 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4109 IMFMediaBuffer* rInBuffer;
4110 IMFSample* rInSample;
4111 BYTE* rInByteBuffer = NULL;
4113 // 5. Create Sample object from input data
4115 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4117 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4118 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4119 rInBuffer->Unlock();
4120 rInByteBuffer = NULL;
4122 rInBuffer->SetCurrentLength( inputBufferSize );
4124 MFCreateSample( &rInSample );
4125 rInSample->AddBuffer( rInBuffer );
4127 // 6. Pass input data to Resampler
4129 _transform->ProcessInput( 0, rInSample, 0 );
4131 SAFE_RELEASE( rInBuffer );
4132 SAFE_RELEASE( rInSample );
4134 // 7. Perform sample rate conversion
4136 IMFMediaBuffer* rOutBuffer = NULL;
4137 BYTE* rOutByteBuffer = NULL;
4139 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4141 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4143 // 7.1 Create Sample object for output data
4145 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4146 MFCreateSample( &( rOutDataBuffer.pSample ) );
4147 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4148 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4149 rOutDataBuffer.dwStreamID = 0;
4150 rOutDataBuffer.dwStatus = 0;
4151 rOutDataBuffer.pEvents = NULL;
4153 // 7.2 Get output data from Resampler
4155 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4158 SAFE_RELEASE( rOutBuffer );
4159 SAFE_RELEASE( rOutDataBuffer.pSample );
4163 // 7.3 Write output data to outBuffer
4165 SAFE_RELEASE( rOutBuffer );
4166 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4167 rOutBuffer->GetCurrentLength( &rBytes );
4169 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4170 memcpy( outBuffer, rOutByteBuffer, rBytes );
4171 rOutBuffer->Unlock();
4172 rOutByteBuffer = NULL;
4174 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4175 SAFE_RELEASE( rOutBuffer );
4176 SAFE_RELEASE( rOutDataBuffer.pSample );
4180 unsigned int _bytesPerSample;
4181 unsigned int _channelCount;
4184 IUnknown* _transformUnk;
4185 IMFTransform* _transform;
4186 IMFMediaType* _mediaType;
4187 IMFMediaType* _inputMediaType;
4188 IMFMediaType* _outputMediaType;
4190 #ifdef __IWMResamplerProps_FWD_DEFINED__
4191 IWMResamplerProps* _resamplerProps;
4195 //-----------------------------------------------------------------------------
4197 // A structure to hold various information related to the WASAPI implementation.
4200 IAudioClient* captureAudioClient;
4201 IAudioClient* renderAudioClient;
4202 IAudioCaptureClient* captureClient;
4203 IAudioRenderClient* renderClient;
4204 HANDLE captureEvent;
4208 : captureAudioClient( NULL ),
4209 renderAudioClient( NULL ),
4210 captureClient( NULL ),
4211 renderClient( NULL ),
4212 captureEvent( NULL ),
4213 renderEvent( NULL ) {}
4216 //=============================================================================
4218 RtApiWasapi::RtApiWasapi()
4219 : coInitialized_( false ), deviceEnumerator_( NULL )
4221 // WASAPI can run either apartment or multi-threaded
4222 HRESULT hr = CoInitialize( NULL );
4223 if ( !FAILED( hr ) )
4224 coInitialized_ = true;
4226 // Instantiate device enumerator
4227 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4228 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4229 ( void** ) &deviceEnumerator_ );
4231 // If this runs on an old Windows, it will fail. Ignore and proceed.
4233 deviceEnumerator_ = NULL;
4236 //-----------------------------------------------------------------------------
4238 RtApiWasapi::~RtApiWasapi()
4240 if ( stream_.state != STREAM_CLOSED )
4243 SAFE_RELEASE( deviceEnumerator_ );
4245 // If this object previously called CoInitialize()
4246 if ( coInitialized_ )
4250 //=============================================================================
4252 unsigned int RtApiWasapi::getDeviceCount( void )
4254 unsigned int captureDeviceCount = 0;
4255 unsigned int renderDeviceCount = 0;
4257 IMMDeviceCollection* captureDevices = NULL;
4258 IMMDeviceCollection* renderDevices = NULL;
4260 if ( !deviceEnumerator_ )
4263 // Count capture devices
4265 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4266 if ( FAILED( hr ) ) {
4267 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4271 hr = captureDevices->GetCount( &captureDeviceCount );
4272 if ( FAILED( hr ) ) {
4273 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4277 // Count render devices
4278 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4279 if ( FAILED( hr ) ) {
4280 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4284 hr = renderDevices->GetCount( &renderDeviceCount );
4285 if ( FAILED( hr ) ) {
4286 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4291 // release all references
4292 SAFE_RELEASE( captureDevices );
4293 SAFE_RELEASE( renderDevices );
4295 if ( errorText_.empty() )
4296 return captureDeviceCount + renderDeviceCount;
4298 error( RtAudioError::DRIVER_ERROR );
4302 //-----------------------------------------------------------------------------
4304 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4306 RtAudio::DeviceInfo info;
4307 unsigned int captureDeviceCount = 0;
4308 unsigned int renderDeviceCount = 0;
4309 std::string defaultDeviceName;
4310 bool isCaptureDevice = false;
4312 PROPVARIANT deviceNameProp;
4313 PROPVARIANT defaultDeviceNameProp;
4315 IMMDeviceCollection* captureDevices = NULL;
4316 IMMDeviceCollection* renderDevices = NULL;
4317 IMMDevice* devicePtr = NULL;
4318 IMMDevice* defaultDevicePtr = NULL;
4319 IAudioClient* audioClient = NULL;
4320 IPropertyStore* devicePropStore = NULL;
4321 IPropertyStore* defaultDevicePropStore = NULL;
4323 WAVEFORMATEX* deviceFormat = NULL;
4324 WAVEFORMATEX* closestMatchFormat = NULL;
4327 info.probed = false;
4329 // Count capture devices
4331 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4332 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4333 if ( FAILED( hr ) ) {
4334 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4338 hr = captureDevices->GetCount( &captureDeviceCount );
4339 if ( FAILED( hr ) ) {
4340 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4344 // Count render devices
4345 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4346 if ( FAILED( hr ) ) {
4347 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4351 hr = renderDevices->GetCount( &renderDeviceCount );
4352 if ( FAILED( hr ) ) {
4353 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4357 // validate device index
4358 if ( device >= captureDeviceCount + renderDeviceCount ) {
4359 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4360 errorType = RtAudioError::INVALID_USE;
4364 // determine whether index falls within capture or render devices
4365 if ( device >= renderDeviceCount ) {
4366 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4367 if ( FAILED( hr ) ) {
4368 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4371 isCaptureDevice = true;
4374 hr = renderDevices->Item( device, &devicePtr );
4375 if ( FAILED( hr ) ) {
4376 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4379 isCaptureDevice = false;
4382 // get default device name
4383 if ( isCaptureDevice ) {
4384 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4385 if ( FAILED( hr ) ) {
4386 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4391 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4392 if ( FAILED( hr ) ) {
4393 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4398 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4399 if ( FAILED( hr ) ) {
4400 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4403 PropVariantInit( &defaultDeviceNameProp );
4405 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4406 if ( FAILED( hr ) ) {
4407 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4411 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4414 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4415 if ( FAILED( hr ) ) {
4416 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4420 PropVariantInit( &deviceNameProp );
4422 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4423 if ( FAILED( hr ) ) {
4424 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4428 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4431 if ( isCaptureDevice ) {
4432 info.isDefaultInput = info.name == defaultDeviceName;
4433 info.isDefaultOutput = false;
4436 info.isDefaultInput = false;
4437 info.isDefaultOutput = info.name == defaultDeviceName;
4441 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4442 if ( FAILED( hr ) ) {
4443 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4447 hr = audioClient->GetMixFormat( &deviceFormat );
4448 if ( FAILED( hr ) ) {
4449 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4453 if ( isCaptureDevice ) {
4454 info.inputChannels = deviceFormat->nChannels;
4455 info.outputChannels = 0;
4456 info.duplexChannels = 0;
4459 info.inputChannels = 0;
4460 info.outputChannels = deviceFormat->nChannels;
4461 info.duplexChannels = 0;
4465 info.sampleRates.clear();
4467 // allow support for all sample rates as we have a built-in sample rate converter
4468 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4469 info.sampleRates.push_back( SAMPLE_RATES[i] );
4471 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4474 info.nativeFormats = 0;
4476 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4477 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4478 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4480 if ( deviceFormat->wBitsPerSample == 32 ) {
4481 info.nativeFormats |= RTAUDIO_FLOAT32;
4483 else if ( deviceFormat->wBitsPerSample == 64 ) {
4484 info.nativeFormats |= RTAUDIO_FLOAT64;
4487 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4488 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4489 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4491 if ( deviceFormat->wBitsPerSample == 8 ) {
4492 info.nativeFormats |= RTAUDIO_SINT8;
4494 else if ( deviceFormat->wBitsPerSample == 16 ) {
4495 info.nativeFormats |= RTAUDIO_SINT16;
4497 else if ( deviceFormat->wBitsPerSample == 24 ) {
4498 info.nativeFormats |= RTAUDIO_SINT24;
4500 else if ( deviceFormat->wBitsPerSample == 32 ) {
4501 info.nativeFormats |= RTAUDIO_SINT32;
4509 // release all references
4510 PropVariantClear( &deviceNameProp );
4511 PropVariantClear( &defaultDeviceNameProp );
4513 SAFE_RELEASE( captureDevices );
4514 SAFE_RELEASE( renderDevices );
4515 SAFE_RELEASE( devicePtr );
4516 SAFE_RELEASE( defaultDevicePtr );
4517 SAFE_RELEASE( audioClient );
4518 SAFE_RELEASE( devicePropStore );
4519 SAFE_RELEASE( defaultDevicePropStore );
4521 CoTaskMemFree( deviceFormat );
4522 CoTaskMemFree( closestMatchFormat );
4524 if ( !errorText_.empty() )
4529 //-----------------------------------------------------------------------------
4531 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4533 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4534 if ( getDeviceInfo( i ).isDefaultOutput ) {
4542 //-----------------------------------------------------------------------------
4544 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4546 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4547 if ( getDeviceInfo( i ).isDefaultInput ) {
4555 //-----------------------------------------------------------------------------
4557 void RtApiWasapi::closeStream( void )
4559 if ( stream_.state == STREAM_CLOSED ) {
4560 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4561 error( RtAudioError::WARNING );
4565 if ( stream_.state != STREAM_STOPPED )
4568 // clean up stream memory
4569 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4570 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4572 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4573 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4575 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4576 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4578 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4579 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4581 delete ( WasapiHandle* ) stream_.apiHandle;
4582 stream_.apiHandle = NULL;
4584 for ( int i = 0; i < 2; i++ ) {
4585 if ( stream_.userBuffer[i] ) {
4586 free( stream_.userBuffer[i] );
4587 stream_.userBuffer[i] = 0;
4591 if ( stream_.deviceBuffer ) {
4592 free( stream_.deviceBuffer );
4593 stream_.deviceBuffer = 0;
4596 // update stream state
4597 stream_.state = STREAM_CLOSED;
4600 //-----------------------------------------------------------------------------
4602 void RtApiWasapi::startStream( void )
4606 if ( stream_.state == STREAM_RUNNING ) {
4607 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4608 error( RtAudioError::WARNING );
4612 #if defined( HAVE_GETTIMEOFDAY )
4613 gettimeofday( &stream_.lastTickTimestamp, NULL );
4616 // update stream state
4617 stream_.state = STREAM_RUNNING;
4619 // create WASAPI stream thread
4620 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4622 if ( !stream_.callbackInfo.thread ) {
4623 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4624 error( RtAudioError::THREAD_ERROR );
4627 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4628 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4632 //-----------------------------------------------------------------------------
4634 void RtApiWasapi::stopStream( void )
4638 if ( stream_.state == STREAM_STOPPED ) {
4639 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4640 error( RtAudioError::WARNING );
4644 // inform stream thread by setting stream state to STREAM_STOPPING
4645 stream_.state = STREAM_STOPPING;
4647 // wait until stream thread is stopped
4648 while( stream_.state != STREAM_STOPPED ) {
4652 // Wait for the last buffer to play before stopping.
4653 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4655 // close thread handle
4656 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4657 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4658 error( RtAudioError::THREAD_ERROR );
4662 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4665 //-----------------------------------------------------------------------------
4667 void RtApiWasapi::abortStream( void )
4671 if ( stream_.state == STREAM_STOPPED ) {
4672 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4673 error( RtAudioError::WARNING );
4677 // inform stream thread by setting stream state to STREAM_STOPPING
4678 stream_.state = STREAM_STOPPING;
4680 // wait until stream thread is stopped
4681 while ( stream_.state != STREAM_STOPPED ) {
4685 // close thread handle
4686 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4687 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4688 error( RtAudioError::THREAD_ERROR );
4692 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4695 //-----------------------------------------------------------------------------
4697 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4698 unsigned int firstChannel, unsigned int sampleRate,
4699 RtAudioFormat format, unsigned int* bufferSize,
4700 RtAudio::StreamOptions* options )
4702 bool methodResult = FAILURE;
4703 unsigned int captureDeviceCount = 0;
4704 unsigned int renderDeviceCount = 0;
4706 IMMDeviceCollection* captureDevices = NULL;
4707 IMMDeviceCollection* renderDevices = NULL;
4708 IMMDevice* devicePtr = NULL;
4709 WAVEFORMATEX* deviceFormat = NULL;
4710 unsigned int bufferBytes;
4711 stream_.state = STREAM_STOPPED;
4713 // create API Handle if not already created
4714 if ( !stream_.apiHandle )
4715 stream_.apiHandle = ( void* ) new WasapiHandle();
4717 // Count capture devices
4719 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4720 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4721 if ( FAILED( hr ) ) {
4722 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4726 hr = captureDevices->GetCount( &captureDeviceCount );
4727 if ( FAILED( hr ) ) {
4728 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4732 // Count render devices
4733 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4734 if ( FAILED( hr ) ) {
4735 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4739 hr = renderDevices->GetCount( &renderDeviceCount );
4740 if ( FAILED( hr ) ) {
4741 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4745 // validate device index
4746 if ( device >= captureDeviceCount + renderDeviceCount ) {
4747 errorType = RtAudioError::INVALID_USE;
4748 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4752 // if device index falls within capture devices
4753 if ( device >= renderDeviceCount ) {
4754 if ( mode != INPUT ) {
4755 errorType = RtAudioError::INVALID_USE;
4756 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4760 // retrieve captureAudioClient from devicePtr
4761 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4763 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4764 if ( FAILED( hr ) ) {
4765 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4769 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4770 NULL, ( void** ) &captureAudioClient );
4771 if ( FAILED( hr ) ) {
4772 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4776 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4777 if ( FAILED( hr ) ) {
4778 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4782 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4783 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4786 // if device index falls within render devices and is configured for loopback
4787 if ( device < renderDeviceCount && mode == INPUT )
4789 // if renderAudioClient is not initialised, initialise it now
4790 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4791 if ( !renderAudioClient )
4793 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4796 // retrieve captureAudioClient from devicePtr
4797 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4799 hr = renderDevices->Item( device, &devicePtr );
4800 if ( FAILED( hr ) ) {
4801 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4805 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4806 NULL, ( void** ) &captureAudioClient );
4807 if ( FAILED( hr ) ) {
4808 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4812 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4813 if ( FAILED( hr ) ) {
4814 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4818 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4819 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4822 // if device index falls within render devices and is configured for output
4823 if ( device < renderDeviceCount && mode == OUTPUT )
4825 // if renderAudioClient is already initialised, don't initialise it again
4826 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4827 if ( renderAudioClient )
4829 methodResult = SUCCESS;
4833 hr = renderDevices->Item( device, &devicePtr );
4834 if ( FAILED( hr ) ) {
4835 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4839 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4840 NULL, ( void** ) &renderAudioClient );
4841 if ( FAILED( hr ) ) {
4842 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4846 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4847 if ( FAILED( hr ) ) {
4848 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4852 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4853 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4857 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4858 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4859 stream_.mode = DUPLEX;
4862 stream_.mode = mode;
4865 stream_.device[mode] = device;
4866 stream_.doByteSwap[mode] = false;
4867 stream_.sampleRate = sampleRate;
4868 stream_.bufferSize = *bufferSize;
4869 stream_.nBuffers = 1;
4870 stream_.nUserChannels[mode] = channels;
4871 stream_.channelOffset[mode] = firstChannel;
4872 stream_.userFormat = format;
4873 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4875 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4876 stream_.userInterleaved = false;
4878 stream_.userInterleaved = true;
4879 stream_.deviceInterleaved[mode] = true;
4881 // Set flags for buffer conversion.
4882 stream_.doConvertBuffer[mode] = false;
4883 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4884 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4885 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4886 stream_.doConvertBuffer[mode] = true;
4887 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4888 stream_.nUserChannels[mode] > 1 )
4889 stream_.doConvertBuffer[mode] = true;
4891 if ( stream_.doConvertBuffer[mode] )
4892 setConvertInfo( mode, 0 );
4894 // Allocate necessary internal buffers
4895 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4897 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4898 if ( !stream_.userBuffer[mode] ) {
4899 errorType = RtAudioError::MEMORY_ERROR;
4900 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4904 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4905 stream_.callbackInfo.priority = 15;
4907 stream_.callbackInfo.priority = 0;
4909 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4910 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4912 methodResult = SUCCESS;
4916 SAFE_RELEASE( captureDevices );
4917 SAFE_RELEASE( renderDevices );
4918 SAFE_RELEASE( devicePtr );
4919 CoTaskMemFree( deviceFormat );
4921 // if method failed, close the stream
4922 if ( methodResult == FAILURE )
4925 if ( !errorText_.empty() )
4927 return methodResult;
4930 //=============================================================================
4932 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4935 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4940 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4943 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4948 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4951 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4956 //-----------------------------------------------------------------------------
4958 void RtApiWasapi::wasapiThread()
4960 // as this is a new thread, we must CoInitialize it
4961 CoInitialize( NULL );
4965 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4966 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4967 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4968 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4969 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4970 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4972 WAVEFORMATEX* captureFormat = NULL;
4973 WAVEFORMATEX* renderFormat = NULL;
4974 float captureSrRatio = 0.0f;
4975 float renderSrRatio = 0.0f;
4976 WasapiBuffer captureBuffer;
4977 WasapiBuffer renderBuffer;
4978 WasapiResampler* captureResampler = NULL;
4979 WasapiResampler* renderResampler = NULL;
4981 // declare local stream variables
4982 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4983 BYTE* streamBuffer = NULL;
4984 unsigned long captureFlags = 0;
4985 unsigned int bufferFrameCount = 0;
4986 unsigned int numFramesPadding = 0;
4987 unsigned int convBufferSize = 0;
4988 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4989 bool callbackPushed = true;
4990 bool callbackPulled = false;
4991 bool callbackStopped = false;
4992 int callbackResult = 0;
4994 // convBuffer is used to store converted buffers between WASAPI and the user
4995 char* convBuffer = NULL;
4996 unsigned int convBuffSize = 0;
4997 unsigned int deviceBuffSize = 0;
4999 std::string errorText;
5000 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5002 // Attempt to assign "Pro Audio" characteristic to thread
5003 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5005 DWORD taskIndex = 0;
5006 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5007 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5008 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5009 FreeLibrary( AvrtDll );
5012 // start capture stream if applicable
5013 if ( captureAudioClient ) {
5014 hr = captureAudioClient->GetMixFormat( &captureFormat );
5015 if ( FAILED( hr ) ) {
5016 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5020 // init captureResampler
5021 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5022 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5023 captureFormat->nSamplesPerSec, stream_.sampleRate );
5025 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5027 if ( !captureClient ) {
5028 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5029 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5034 if ( FAILED( hr ) ) {
5035 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5039 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5040 ( void** ) &captureClient );
5041 if ( FAILED( hr ) ) {
5042 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5046 // don't configure captureEvent if in loopback mode
5047 if ( !loopbackEnabled )
5049 // configure captureEvent to trigger on every available capture buffer
5050 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5051 if ( !captureEvent ) {
5052 errorType = RtAudioError::SYSTEM_ERROR;
5053 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5057 hr = captureAudioClient->SetEventHandle( captureEvent );
5058 if ( FAILED( hr ) ) {
5059 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5063 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5066 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5068 // reset the capture stream
5069 hr = captureAudioClient->Reset();
5070 if ( FAILED( hr ) ) {
5071 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5075 // start the capture stream
5076 hr = captureAudioClient->Start();
5077 if ( FAILED( hr ) ) {
5078 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5083 unsigned int inBufferSize = 0;
5084 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5085 if ( FAILED( hr ) ) {
5086 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5090 // scale outBufferSize according to stream->user sample rate ratio
5091 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5092 inBufferSize *= stream_.nDeviceChannels[INPUT];
5094 // set captureBuffer size
5095 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5098 // start render stream if applicable
5099 if ( renderAudioClient ) {
5100 hr = renderAudioClient->GetMixFormat( &renderFormat );
5101 if ( FAILED( hr ) ) {
5102 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5106 // init renderResampler
5107 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5108 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5109 stream_.sampleRate, renderFormat->nSamplesPerSec );
5111 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5113 if ( !renderClient ) {
5114 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5115 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5120 if ( FAILED( hr ) ) {
5121 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5125 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5126 ( void** ) &renderClient );
5127 if ( FAILED( hr ) ) {
5128 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5132 // configure renderEvent to trigger on every available render buffer
5133 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5134 if ( !renderEvent ) {
5135 errorType = RtAudioError::SYSTEM_ERROR;
5136 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5140 hr = renderAudioClient->SetEventHandle( renderEvent );
5141 if ( FAILED( hr ) ) {
5142 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5146 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5147 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5149 // reset the render stream
5150 hr = renderAudioClient->Reset();
5151 if ( FAILED( hr ) ) {
5152 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5156 // start the render stream
5157 hr = renderAudioClient->Start();
5158 if ( FAILED( hr ) ) {
5159 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5164 unsigned int outBufferSize = 0;
5165 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5166 if ( FAILED( hr ) ) {
5167 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5171 // scale inBufferSize according to user->stream sample rate ratio
5172 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5173 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5175 // set renderBuffer size
5176 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5179 // malloc buffer memory
5180 if ( stream_.mode == INPUT )
5182 using namespace std; // for ceilf
5183 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5184 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5186 else if ( stream_.mode == OUTPUT )
5188 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5189 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5191 else if ( stream_.mode == DUPLEX )
5193 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5194 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5195 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5196 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5199 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5200 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5201 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5202 if ( !convBuffer || !stream_.deviceBuffer ) {
5203 errorType = RtAudioError::MEMORY_ERROR;
5204 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5208 // stream process loop
5209 while ( stream_.state != STREAM_STOPPING ) {
5210 if ( !callbackPulled ) {
5213 // 1. Pull callback buffer from inputBuffer
5214 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5215 // Convert callback buffer to user format
5217 if ( captureAudioClient )
5219 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5220 if ( captureSrRatio != 1 )
5222 // account for remainders
5227 while ( convBufferSize < stream_.bufferSize )
5229 // Pull callback buffer from inputBuffer
5230 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5231 samplesToPull * stream_.nDeviceChannels[INPUT],
5232 stream_.deviceFormat[INPUT] );
5234 if ( !callbackPulled )
5239 // Convert callback buffer to user sample rate
5240 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5241 unsigned int convSamples = 0;
5243 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5248 convBufferSize += convSamples;
5249 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5252 if ( callbackPulled )
5254 if ( stream_.doConvertBuffer[INPUT] ) {
5255 // Convert callback buffer to user format
5256 convertBuffer( stream_.userBuffer[INPUT],
5257 stream_.deviceBuffer,
5258 stream_.convertInfo[INPUT] );
5261 // no further conversion, simple copy deviceBuffer to userBuffer
5262 memcpy( stream_.userBuffer[INPUT],
5263 stream_.deviceBuffer,
5264 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5269 // if there is no capture stream, set callbackPulled flag
5270 callbackPulled = true;
5275 // 1. Execute user callback method
5276 // 2. Handle return value from callback
5278 // if callback has not requested the stream to stop
5279 if ( callbackPulled && !callbackStopped ) {
5280 // Execute user callback method
5281 callbackResult = callback( stream_.userBuffer[OUTPUT],
5282 stream_.userBuffer[INPUT],
5285 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5286 stream_.callbackInfo.userData );
5289 RtApi::tickStreamTime();
5291 // Handle return value from callback
5292 if ( callbackResult == 1 ) {
5293 // instantiate a thread to stop this thread
5294 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5295 if ( !threadHandle ) {
5296 errorType = RtAudioError::THREAD_ERROR;
5297 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5300 else if ( !CloseHandle( threadHandle ) ) {
5301 errorType = RtAudioError::THREAD_ERROR;
5302 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5306 callbackStopped = true;
5308 else if ( callbackResult == 2 ) {
5309 // instantiate a thread to stop this thread
5310 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5311 if ( !threadHandle ) {
5312 errorType = RtAudioError::THREAD_ERROR;
5313 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5316 else if ( !CloseHandle( threadHandle ) ) {
5317 errorType = RtAudioError::THREAD_ERROR;
5318 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5322 callbackStopped = true;
5329 // 1. Convert callback buffer to stream format
5330 // 2. Convert callback buffer to stream sample rate and channel count
5331 // 3. Push callback buffer into outputBuffer
5333 if ( renderAudioClient && callbackPulled )
5335 // if the last call to renderBuffer.PushBuffer() was successful
5336 if ( callbackPushed || convBufferSize == 0 )
5338 if ( stream_.doConvertBuffer[OUTPUT] )
5340 // Convert callback buffer to stream format
5341 convertBuffer( stream_.deviceBuffer,
5342 stream_.userBuffer[OUTPUT],
5343 stream_.convertInfo[OUTPUT] );
5347 // no further conversion, simple copy userBuffer to deviceBuffer
5348 memcpy( stream_.deviceBuffer,
5349 stream_.userBuffer[OUTPUT],
5350 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5353 // Convert callback buffer to stream sample rate
5354 renderResampler->Convert( convBuffer,
5355 stream_.deviceBuffer,
5360 // Push callback buffer into outputBuffer
5361 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5362 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5363 stream_.deviceFormat[OUTPUT] );
5366 // if there is no render stream, set callbackPushed flag
5367 callbackPushed = true;
5372 // 1. Get capture buffer from stream
5373 // 2. Push capture buffer into inputBuffer
5374 // 3. If 2. was successful: Release capture buffer
5376 if ( captureAudioClient ) {
5377 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5378 if ( !callbackPulled ) {
5379 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5382 // Get capture buffer from stream
5383 hr = captureClient->GetBuffer( &streamBuffer,
5385 &captureFlags, NULL, NULL );
5386 if ( FAILED( hr ) ) {
5387 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5391 if ( bufferFrameCount != 0 ) {
5392 // Push capture buffer into inputBuffer
5393 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5394 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5395 stream_.deviceFormat[INPUT] ) )
5397 // Release capture buffer
5398 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5399 if ( FAILED( hr ) ) {
5400 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5406 // Inform WASAPI that capture was unsuccessful
5407 hr = captureClient->ReleaseBuffer( 0 );
5408 if ( FAILED( hr ) ) {
5409 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5416 // Inform WASAPI that capture was unsuccessful
5417 hr = captureClient->ReleaseBuffer( 0 );
5418 if ( FAILED( hr ) ) {
5419 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5427 // 1. Get render buffer from stream
5428 // 2. Pull next buffer from outputBuffer
5429 // 3. If 2. was successful: Fill render buffer with next buffer
5430 // Release render buffer
5432 if ( renderAudioClient ) {
5433 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5434 if ( callbackPulled && !callbackPushed ) {
5435 WaitForSingleObject( renderEvent, INFINITE );
5438 // Get render buffer from stream
5439 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5440 if ( FAILED( hr ) ) {
5441 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5445 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5451 bufferFrameCount -= numFramesPadding;
5453 if ( bufferFrameCount != 0 ) {
5454 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5455 if ( FAILED( hr ) ) {
5456 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5460 // Pull next buffer from outputBuffer
5461 // Fill render buffer with next buffer
5462 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5463 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5464 stream_.deviceFormat[OUTPUT] ) )
5466 // Release render buffer
5467 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5468 if ( FAILED( hr ) ) {
5469 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5475 // Inform WASAPI that render was unsuccessful
5476 hr = renderClient->ReleaseBuffer( 0, 0 );
5477 if ( FAILED( hr ) ) {
5478 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5485 // Inform WASAPI that render was unsuccessful
5486 hr = renderClient->ReleaseBuffer( 0, 0 );
5487 if ( FAILED( hr ) ) {
5488 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5494 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5495 if ( callbackPushed ) {
5496 // unsetting the callbackPulled flag lets the stream know that
5497 // the audio device is ready for another callback output buffer.
5498 callbackPulled = false;
5505 CoTaskMemFree( captureFormat );
5506 CoTaskMemFree( renderFormat );
5508 free ( convBuffer );
5509 delete renderResampler;
5510 delete captureResampler;
5514 // update stream state
5515 stream_.state = STREAM_STOPPED;
5517 if ( !errorText.empty() )
5519 errorText_ = errorText;
5524 //******************** End of __WINDOWS_WASAPI__ *********************//
5528 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5530 // Modified by Robin Davies, October 2005
5531 // - Improvements to DirectX pointer chasing.
5532 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5533 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5534 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5535 // Changed device query structure for RtAudio 4.0.7, January 2010
5537 #include <windows.h>
5538 #include <process.h>
5539 #include <mmsystem.h>
5543 #include <algorithm>
5545 #if defined(__MINGW32__)
5546 // missing from latest mingw winapi
5547 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5548 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5549 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5550 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5553 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5555 #ifdef _MSC_VER // if Microsoft Visual C++
5556 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5559 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5561 if ( pointer > bufferSize ) pointer -= bufferSize;
5562 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5563 if ( pointer < earlierPointer ) pointer += bufferSize;
5564 return pointer >= earlierPointer && pointer < laterPointer;
5567 // A structure to hold various information related to the DirectSound
5568 // API implementation.
5570 unsigned int drainCounter; // Tracks callback counts when draining
5571 bool internalDrain; // Indicates if stop is initiated from callback or not.
5575 UINT bufferPointer[2];
5576 DWORD dsBufferSize[2];
5577 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5581 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5584 // Declarations for utility functions, callbacks, and structures
5585 // specific to the DirectSound implementation.
5586 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5587 LPCTSTR description,
5591 static const char* getErrorString( int code );
5593 static unsigned __stdcall callbackHandler( void *ptr );
5602 : found(false) { validId[0] = false; validId[1] = false; }
5605 struct DsProbeData {
5607 std::vector<struct DsDevice>* dsDevices;
5610 RtApiDs :: RtApiDs()
5612 // Dsound will run both-threaded. If CoInitialize fails, then just
5613 // accept whatever the mainline chose for a threading model.
5614 coInitialized_ = false;
5615 HRESULT hr = CoInitialize( NULL );
5616 if ( !FAILED( hr ) ) coInitialized_ = true;
5619 RtApiDs :: ~RtApiDs()
5621 if ( stream_.state != STREAM_CLOSED ) closeStream();
5622 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5625 // The DirectSound default output is always the first device.
5626 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5631 // The DirectSound default input is always the first input device,
5632 // which is the first capture device enumerated.
5633 unsigned int RtApiDs :: getDefaultInputDevice( void )
5638 unsigned int RtApiDs :: getDeviceCount( void )
5640 // Set query flag for previously found devices to false, so that we
5641 // can check for any devices that have disappeared.
5642 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5643 dsDevices[i].found = false;
5645 // Query DirectSound devices.
5646 struct DsProbeData probeInfo;
5647 probeInfo.isInput = false;
5648 probeInfo.dsDevices = &dsDevices;
5649 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5650 if ( FAILED( result ) ) {
5651 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5652 errorText_ = errorStream_.str();
5653 error( RtAudioError::WARNING );
5656 // Query DirectSoundCapture devices.
5657 probeInfo.isInput = true;
5658 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5659 if ( FAILED( result ) ) {
5660 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5661 errorText_ = errorStream_.str();
5662 error( RtAudioError::WARNING );
5665 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5666 for ( unsigned int i=0; i<dsDevices.size(); ) {
5667 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5671 return static_cast<unsigned int>(dsDevices.size());
5674 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5676 RtAudio::DeviceInfo info;
5677 info.probed = false;
5679 if ( dsDevices.size() == 0 ) {
5680 // Force a query of all devices
5682 if ( dsDevices.size() == 0 ) {
5683 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5684 error( RtAudioError::INVALID_USE );
5689 if ( device >= dsDevices.size() ) {
5690 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5691 error( RtAudioError::INVALID_USE );
5696 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5698 LPDIRECTSOUND output;
5700 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5701 if ( FAILED( result ) ) {
5702 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5703 errorText_ = errorStream_.str();
5704 error( RtAudioError::WARNING );
5708 outCaps.dwSize = sizeof( outCaps );
5709 result = output->GetCaps( &outCaps );
5710 if ( FAILED( result ) ) {
5712 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5713 errorText_ = errorStream_.str();
5714 error( RtAudioError::WARNING );
5718 // Get output channel information.
5719 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5721 // Get sample rate information.
5722 info.sampleRates.clear();
5723 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5724 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5725 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5726 info.sampleRates.push_back( SAMPLE_RATES[k] );
5728 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5729 info.preferredSampleRate = SAMPLE_RATES[k];
5733 // Get format information.
5734 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5735 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5739 if ( getDefaultOutputDevice() == device )
5740 info.isDefaultOutput = true;
5742 if ( dsDevices[ device ].validId[1] == false ) {
5743 info.name = dsDevices[ device ].name;
5750 LPDIRECTSOUNDCAPTURE input;
5751 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5752 if ( FAILED( result ) ) {
5753 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5754 errorText_ = errorStream_.str();
5755 error( RtAudioError::WARNING );
5760 inCaps.dwSize = sizeof( inCaps );
5761 result = input->GetCaps( &inCaps );
5762 if ( FAILED( result ) ) {
5764 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5765 errorText_ = errorStream_.str();
5766 error( RtAudioError::WARNING );
5770 // Get input channel information.
5771 info.inputChannels = inCaps.dwChannels;
5773 // Get sample rate and format information.
5774 std::vector<unsigned int> rates;
5775 if ( inCaps.dwChannels >= 2 ) {
5776 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5777 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5778 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5779 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5780 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5781 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5782 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5783 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5785 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5786 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5787 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5788 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5789 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5791 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5792 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5793 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5794 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5795 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5798 else if ( inCaps.dwChannels == 1 ) {
5799 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5800 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5801 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5802 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5803 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5804 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5805 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5806 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5808 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5809 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5810 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5811 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5812 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5814 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5815 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5816 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5817 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5818 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5821 else info.inputChannels = 0; // technically, this would be an error
5825 if ( info.inputChannels == 0 ) return info;
5827 // Copy the supported rates to the info structure but avoid duplication.
5829 for ( unsigned int i=0; i<rates.size(); i++ ) {
5831 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5832 if ( rates[i] == info.sampleRates[j] ) {
5837 if ( found == false ) info.sampleRates.push_back( rates[i] );
5839 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5841 // If device opens for both playback and capture, we determine the channels.
5842 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5843 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5845 if ( device == 0 ) info.isDefaultInput = true;
5847 // Copy name and return.
5848 info.name = dsDevices[ device ].name;
5853 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5854 unsigned int firstChannel, unsigned int sampleRate,
5855 RtAudioFormat format, unsigned int *bufferSize,
5856 RtAudio::StreamOptions *options )
5858 if ( channels + firstChannel > 2 ) {
5859 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5863 size_t nDevices = dsDevices.size();
5864 if ( nDevices == 0 ) {
5865 // This should not happen because a check is made before this function is called.
5866 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5870 if ( device >= nDevices ) {
5871 // This should not happen because a check is made before this function is called.
5872 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5876 if ( mode == OUTPUT ) {
5877 if ( dsDevices[ device ].validId[0] == false ) {
5878 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5879 errorText_ = errorStream_.str();
5883 else { // mode == INPUT
5884 if ( dsDevices[ device ].validId[1] == false ) {
5885 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5886 errorText_ = errorStream_.str();
5891 // According to a note in PortAudio, using GetDesktopWindow()
5892 // instead of GetForegroundWindow() is supposed to avoid problems
5893 // that occur when the application's window is not the foreground
5894 // window. Also, if the application window closes before the
5895 // DirectSound buffer, DirectSound can crash. In the past, I had
5896 // problems when using GetDesktopWindow() but it seems fine now
5897 // (January 2010). I'll leave it commented here.
5898 // HWND hWnd = GetForegroundWindow();
5899 HWND hWnd = GetDesktopWindow();
5901 // Check the numberOfBuffers parameter and limit the lowest value to
5902 // two. This is a judgement call and a value of two is probably too
5903 // low for capture, but it should work for playback.
5905 if ( options ) nBuffers = options->numberOfBuffers;
5906 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5907 if ( nBuffers < 2 ) nBuffers = 3;
5909 // Check the lower range of the user-specified buffer size and set
5910 // (arbitrarily) to a lower bound of 32.
5911 if ( *bufferSize < 32 ) *bufferSize = 32;
5913 // Create the wave format structure. The data format setting will
5914 // be determined later.
5915 WAVEFORMATEX waveFormat;
5916 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5917 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5918 waveFormat.nChannels = channels + firstChannel;
5919 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5921 // Determine the device buffer size. By default, we'll use the value
5922 // defined above (32K), but we will grow it to make allowances for
5923 // very large software buffer sizes.
5924 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5925 DWORD dsPointerLeadTime = 0;
5927 void *ohandle = 0, *bhandle = 0;
5929 if ( mode == OUTPUT ) {
5931 LPDIRECTSOUND output;
5932 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5933 if ( FAILED( result ) ) {
5934 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5935 errorText_ = errorStream_.str();
5940 outCaps.dwSize = sizeof( outCaps );
5941 result = output->GetCaps( &outCaps );
5942 if ( FAILED( result ) ) {
5944 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5945 errorText_ = errorStream_.str();
5949 // Check channel information.
5950 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5951 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5952 errorText_ = errorStream_.str();
5956 // Check format information. Use 16-bit format unless not
5957 // supported or user requests 8-bit.
5958 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5959 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5960 waveFormat.wBitsPerSample = 16;
5961 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5964 waveFormat.wBitsPerSample = 8;
5965 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5967 stream_.userFormat = format;
5969 // Update wave format structure and buffer information.
5970 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5971 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5972 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5974 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5975 while ( dsPointerLeadTime * 2U > dsBufferSize )
5978 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5979 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5980 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5981 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5982 if ( FAILED( result ) ) {
5984 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5985 errorText_ = errorStream_.str();
5989 // Even though we will write to the secondary buffer, we need to
5990 // access the primary buffer to set the correct output format
5991 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5992 // buffer description.
5993 DSBUFFERDESC bufferDescription;
5994 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5995 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5996 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5998 // Obtain the primary buffer
5999 LPDIRECTSOUNDBUFFER buffer;
6000 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6001 if ( FAILED( result ) ) {
6003 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6004 errorText_ = errorStream_.str();
6008 // Set the primary DS buffer sound format.
6009 result = buffer->SetFormat( &waveFormat );
6010 if ( FAILED( result ) ) {
6012 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6013 errorText_ = errorStream_.str();
6017 // Setup the secondary DS buffer description.
6018 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6019 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6020 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6021 DSBCAPS_GLOBALFOCUS |
6022 DSBCAPS_GETCURRENTPOSITION2 |
6023 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6024 bufferDescription.dwBufferBytes = dsBufferSize;
6025 bufferDescription.lpwfxFormat = &waveFormat;
6027 // Try to create the secondary DS buffer. If that doesn't work,
6028 // try to use software mixing. Otherwise, there's a problem.
6029 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6030 if ( FAILED( result ) ) {
6031 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6032 DSBCAPS_GLOBALFOCUS |
6033 DSBCAPS_GETCURRENTPOSITION2 |
6034 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6035 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6036 if ( FAILED( result ) ) {
6038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6039 errorText_ = errorStream_.str();
6044 // Get the buffer size ... might be different from what we specified.
6046 dsbcaps.dwSize = sizeof( DSBCAPS );
6047 result = buffer->GetCaps( &dsbcaps );
6048 if ( FAILED( result ) ) {
6051 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6052 errorText_ = errorStream_.str();
6056 dsBufferSize = dsbcaps.dwBufferBytes;
6058 // Lock the DS buffer
6061 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6062 if ( FAILED( result ) ) {
6065 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6066 errorText_ = errorStream_.str();
6070 // Zero the DS buffer
6071 ZeroMemory( audioPtr, dataLen );
6073 // Unlock the DS buffer
6074 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6075 if ( FAILED( result ) ) {
6078 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6079 errorText_ = errorStream_.str();
6083 ohandle = (void *) output;
6084 bhandle = (void *) buffer;
6087 if ( mode == INPUT ) {
6089 LPDIRECTSOUNDCAPTURE input;
6090 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6091 if ( FAILED( result ) ) {
6092 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6093 errorText_ = errorStream_.str();
6098 inCaps.dwSize = sizeof( inCaps );
6099 result = input->GetCaps( &inCaps );
6100 if ( FAILED( result ) ) {
6102 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6103 errorText_ = errorStream_.str();
6107 // Check channel information.
6108 if ( inCaps.dwChannels < channels + firstChannel ) {
6109 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6113 // Check format information. Use 16-bit format unless user
6115 DWORD deviceFormats;
6116 if ( channels + firstChannel == 2 ) {
6117 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6118 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6119 waveFormat.wBitsPerSample = 8;
6120 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6122 else { // assume 16-bit is supported
6123 waveFormat.wBitsPerSample = 16;
6124 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6127 else { // channel == 1
6128 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6129 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6130 waveFormat.wBitsPerSample = 8;
6131 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6133 else { // assume 16-bit is supported
6134 waveFormat.wBitsPerSample = 16;
6135 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6138 stream_.userFormat = format;
6140 // Update wave format structure and buffer information.
6141 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6142 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6143 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6145 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6146 while ( dsPointerLeadTime * 2U > dsBufferSize )
6149 // Setup the secondary DS buffer description.
6150 DSCBUFFERDESC bufferDescription;
6151 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6152 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6153 bufferDescription.dwFlags = 0;
6154 bufferDescription.dwReserved = 0;
6155 bufferDescription.dwBufferBytes = dsBufferSize;
6156 bufferDescription.lpwfxFormat = &waveFormat;
6158 // Create the capture buffer.
6159 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6160 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6161 if ( FAILED( result ) ) {
6163 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6164 errorText_ = errorStream_.str();
6168 // Get the buffer size ... might be different from what we specified.
6170 dscbcaps.dwSize = sizeof( DSCBCAPS );
6171 result = buffer->GetCaps( &dscbcaps );
6172 if ( FAILED( result ) ) {
6175 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6176 errorText_ = errorStream_.str();
6180 dsBufferSize = dscbcaps.dwBufferBytes;
6182 // NOTE: We could have a problem here if this is a duplex stream
6183 // and the play and capture hardware buffer sizes are different
6184 // (I'm actually not sure if that is a problem or not).
6185 // Currently, we are not verifying that.
6187 // Lock the capture buffer
6190 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6191 if ( FAILED( result ) ) {
6194 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6195 errorText_ = errorStream_.str();
6200 ZeroMemory( audioPtr, dataLen );
6202 // Unlock the buffer
6203 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6204 if ( FAILED( result ) ) {
6207 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6208 errorText_ = errorStream_.str();
6212 ohandle = (void *) input;
6213 bhandle = (void *) buffer;
6216 // Set various stream parameters
6217 DsHandle *handle = 0;
6218 stream_.nDeviceChannels[mode] = channels + firstChannel;
6219 stream_.nUserChannels[mode] = channels;
6220 stream_.bufferSize = *bufferSize;
6221 stream_.channelOffset[mode] = firstChannel;
6222 stream_.deviceInterleaved[mode] = true;
6223 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6224 else stream_.userInterleaved = true;
6226 // Set flag for buffer conversion
6227 stream_.doConvertBuffer[mode] = false;
6228 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6229 stream_.doConvertBuffer[mode] = true;
6230 if (stream_.userFormat != stream_.deviceFormat[mode])
6231 stream_.doConvertBuffer[mode] = true;
6232 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6233 stream_.nUserChannels[mode] > 1 )
6234 stream_.doConvertBuffer[mode] = true;
6236 // Allocate necessary internal buffers
6237 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6238 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6239 if ( stream_.userBuffer[mode] == NULL ) {
6240 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6244 if ( stream_.doConvertBuffer[mode] ) {
6246 bool makeBuffer = true;
6247 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6248 if ( mode == INPUT ) {
6249 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6250 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6251 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6256 bufferBytes *= *bufferSize;
6257 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6258 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6259 if ( stream_.deviceBuffer == NULL ) {
6260 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6266 // Allocate our DsHandle structures for the stream.
6267 if ( stream_.apiHandle == 0 ) {
6269 handle = new DsHandle;
6271 catch ( std::bad_alloc& ) {
6272 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6276 // Create a manual-reset event.
6277 handle->condition = CreateEvent( NULL, // no security
6278 TRUE, // manual-reset
6279 FALSE, // non-signaled initially
6281 stream_.apiHandle = (void *) handle;
6284 handle = (DsHandle *) stream_.apiHandle;
6285 handle->id[mode] = ohandle;
6286 handle->buffer[mode] = bhandle;
6287 handle->dsBufferSize[mode] = dsBufferSize;
6288 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6290 stream_.device[mode] = device;
6291 stream_.state = STREAM_STOPPED;
6292 if ( stream_.mode == OUTPUT && mode == INPUT )
6293 // We had already set up an output stream.
6294 stream_.mode = DUPLEX;
6296 stream_.mode = mode;
6297 stream_.nBuffers = nBuffers;
6298 stream_.sampleRate = sampleRate;
6300 // Setup the buffer conversion information structure.
6301 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6303 // Setup the callback thread.
6304 if ( stream_.callbackInfo.isRunning == false ) {
6306 stream_.callbackInfo.isRunning = true;
6307 stream_.callbackInfo.object = (void *) this;
6308 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6309 &stream_.callbackInfo, 0, &threadId );
6310 if ( stream_.callbackInfo.thread == 0 ) {
6311 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6315 // Boost DS thread priority
6316 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6322 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6323 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6324 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6325 if ( buffer ) buffer->Release();
6328 if ( handle->buffer[1] ) {
6329 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6330 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6331 if ( buffer ) buffer->Release();
6334 CloseHandle( handle->condition );
6336 stream_.apiHandle = 0;
6339 for ( int i=0; i<2; i++ ) {
6340 if ( stream_.userBuffer[i] ) {
6341 free( stream_.userBuffer[i] );
6342 stream_.userBuffer[i] = 0;
6346 if ( stream_.deviceBuffer ) {
6347 free( stream_.deviceBuffer );
6348 stream_.deviceBuffer = 0;
6351 stream_.state = STREAM_CLOSED;
6355 void RtApiDs :: closeStream()
6357 if ( stream_.state == STREAM_CLOSED ) {
6358 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6359 error( RtAudioError::WARNING );
6363 // Stop the callback thread.
6364 stream_.callbackInfo.isRunning = false;
6365 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6366 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6368 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6370 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6371 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6372 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6379 if ( handle->buffer[1] ) {
6380 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6381 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6388 CloseHandle( handle->condition );
6390 stream_.apiHandle = 0;
6393 for ( int i=0; i<2; i++ ) {
6394 if ( stream_.userBuffer[i] ) {
6395 free( stream_.userBuffer[i] );
6396 stream_.userBuffer[i] = 0;
6400 if ( stream_.deviceBuffer ) {
6401 free( stream_.deviceBuffer );
6402 stream_.deviceBuffer = 0;
6405 stream_.mode = UNINITIALIZED;
6406 stream_.state = STREAM_CLOSED;
6409 void RtApiDs :: startStream()
6412 if ( stream_.state == STREAM_RUNNING ) {
6413 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6414 error( RtAudioError::WARNING );
6418 #if defined( HAVE_GETTIMEOFDAY )
6419 gettimeofday( &stream_.lastTickTimestamp, NULL );
6422 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6424 // Increase scheduler frequency on lesser windows (a side-effect of
6425 // increasing timer accuracy). On greater windows (Win2K or later),
6426 // this is already in effect.
6427 timeBeginPeriod( 1 );
6429 buffersRolling = false;
6430 duplexPrerollBytes = 0;
6432 if ( stream_.mode == DUPLEX ) {
6433 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6434 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6438 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6440 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6441 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6442 if ( FAILED( result ) ) {
6443 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6444 errorText_ = errorStream_.str();
6449 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6451 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6452 result = buffer->Start( DSCBSTART_LOOPING );
6453 if ( FAILED( result ) ) {
6454 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6455 errorText_ = errorStream_.str();
6460 handle->drainCounter = 0;
6461 handle->internalDrain = false;
6462 ResetEvent( handle->condition );
6463 stream_.state = STREAM_RUNNING;
6466 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6469 void RtApiDs :: stopStream()
6472 if ( stream_.state == STREAM_STOPPED ) {
6473 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6474 error( RtAudioError::WARNING );
6481 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6482 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6483 if ( handle->drainCounter == 0 ) {
6484 handle->drainCounter = 2;
6485 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6488 stream_.state = STREAM_STOPPED;
6490 MUTEX_LOCK( &stream_.mutex );
6492 // Stop the buffer and clear memory
6493 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6494 result = buffer->Stop();
6495 if ( FAILED( result ) ) {
6496 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6497 errorText_ = errorStream_.str();
6501 // Lock the buffer and clear it so that if we start to play again,
6502 // we won't have old data playing.
6503 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6504 if ( FAILED( result ) ) {
6505 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6506 errorText_ = errorStream_.str();
6510 // Zero the DS buffer
6511 ZeroMemory( audioPtr, dataLen );
6513 // Unlock the DS buffer
6514 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6515 if ( FAILED( result ) ) {
6516 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6517 errorText_ = errorStream_.str();
6521 // If we start playing again, we must begin at beginning of buffer.
6522 handle->bufferPointer[0] = 0;
6525 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6526 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6530 stream_.state = STREAM_STOPPED;
6532 if ( stream_.mode != DUPLEX )
6533 MUTEX_LOCK( &stream_.mutex );
6535 result = buffer->Stop();
6536 if ( FAILED( result ) ) {
6537 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6538 errorText_ = errorStream_.str();
6542 // Lock the buffer and clear it so that if we start to play again,
6543 // we won't have old data playing.
6544 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6545 if ( FAILED( result ) ) {
6546 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6547 errorText_ = errorStream_.str();
6551 // Zero the DS buffer
6552 ZeroMemory( audioPtr, dataLen );
6554 // Unlock the DS buffer
6555 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6556 if ( FAILED( result ) ) {
6557 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6558 errorText_ = errorStream_.str();
6562 // If we start recording again, we must begin at beginning of buffer.
6563 handle->bufferPointer[1] = 0;
6567 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6568 MUTEX_UNLOCK( &stream_.mutex );
6570 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6573 void RtApiDs :: abortStream()
6576 if ( stream_.state == STREAM_STOPPED ) {
6577 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6578 error( RtAudioError::WARNING );
6582 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6583 handle->drainCounter = 2;
6588 void RtApiDs :: callbackEvent()
6590 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6591 Sleep( 50 ); // sleep 50 milliseconds
6595 if ( stream_.state == STREAM_CLOSED ) {
6596 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6597 error( RtAudioError::WARNING );
6601 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6602 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6604 // Check if we were draining the stream and signal is finished.
6605 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6607 stream_.state = STREAM_STOPPING;
6608 if ( handle->internalDrain == false )
6609 SetEvent( handle->condition );
6615 // Invoke user callback to get fresh output data UNLESS we are
6617 if ( handle->drainCounter == 0 ) {
6618 RtAudioCallback callback = (RtAudioCallback) info->callback;
6619 double streamTime = getStreamTime();
6620 RtAudioStreamStatus status = 0;
6621 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6622 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6623 handle->xrun[0] = false;
6625 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6626 status |= RTAUDIO_INPUT_OVERFLOW;
6627 handle->xrun[1] = false;
6629 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6630 stream_.bufferSize, streamTime, status, info->userData );
6631 if ( cbReturnValue == 2 ) {
6632 stream_.state = STREAM_STOPPING;
6633 handle->drainCounter = 2;
6637 else if ( cbReturnValue == 1 ) {
6638 handle->drainCounter = 1;
6639 handle->internalDrain = true;
6644 DWORD currentWritePointer, safeWritePointer;
6645 DWORD currentReadPointer, safeReadPointer;
6646 UINT nextWritePointer;
6648 LPVOID buffer1 = NULL;
6649 LPVOID buffer2 = NULL;
6650 DWORD bufferSize1 = 0;
6651 DWORD bufferSize2 = 0;
6656 MUTEX_LOCK( &stream_.mutex );
6657 if ( stream_.state == STREAM_STOPPED ) {
6658 MUTEX_UNLOCK( &stream_.mutex );
6662 if ( buffersRolling == false ) {
6663 if ( stream_.mode == DUPLEX ) {
6664 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6666 // It takes a while for the devices to get rolling. As a result,
6667 // there's no guarantee that the capture and write device pointers
6668 // will move in lockstep. Wait here for both devices to start
6669 // rolling, and then set our buffer pointers accordingly.
6670 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6671 // bytes later than the write buffer.
6673 // Stub: a serious risk of having a pre-emptive scheduling round
6674 // take place between the two GetCurrentPosition calls... but I'm
6675 // really not sure how to solve the problem. Temporarily boost to
6676 // Realtime priority, maybe; but I'm not sure what priority the
6677 // DirectSound service threads run at. We *should* be roughly
6678 // within a ms or so of correct.
6680 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6681 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6683 DWORD startSafeWritePointer, startSafeReadPointer;
6685 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6686 if ( FAILED( result ) ) {
6687 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6688 errorText_ = errorStream_.str();
6689 MUTEX_UNLOCK( &stream_.mutex );
6690 error( RtAudioError::SYSTEM_ERROR );
6693 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6694 if ( FAILED( result ) ) {
6695 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6696 errorText_ = errorStream_.str();
6697 MUTEX_UNLOCK( &stream_.mutex );
6698 error( RtAudioError::SYSTEM_ERROR );
6702 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6703 if ( FAILED( result ) ) {
6704 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6705 errorText_ = errorStream_.str();
6706 MUTEX_UNLOCK( &stream_.mutex );
6707 error( RtAudioError::SYSTEM_ERROR );
6710 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6711 if ( FAILED( result ) ) {
6712 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6713 errorText_ = errorStream_.str();
6714 MUTEX_UNLOCK( &stream_.mutex );
6715 error( RtAudioError::SYSTEM_ERROR );
6718 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6722 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6724 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6725 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6726 handle->bufferPointer[1] = safeReadPointer;
6728 else if ( stream_.mode == OUTPUT ) {
6730 // Set the proper nextWritePosition after initial startup.
6731 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6732 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6733 if ( FAILED( result ) ) {
6734 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6735 errorText_ = errorStream_.str();
6736 MUTEX_UNLOCK( &stream_.mutex );
6737 error( RtAudioError::SYSTEM_ERROR );
6740 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6741 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6744 buffersRolling = true;
6747 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6749 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6751 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6752 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6753 bufferBytes *= formatBytes( stream_.userFormat );
6754 memset( stream_.userBuffer[0], 0, bufferBytes );
6757 // Setup parameters and do buffer conversion if necessary.
6758 if ( stream_.doConvertBuffer[0] ) {
6759 buffer = stream_.deviceBuffer;
6760 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6761 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6762 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6765 buffer = stream_.userBuffer[0];
6766 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6767 bufferBytes *= formatBytes( stream_.userFormat );
6770 // No byte swapping necessary in DirectSound implementation.
6772 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6773 // unsigned. So, we need to convert our signed 8-bit data here to
6775 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6776 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6778 DWORD dsBufferSize = handle->dsBufferSize[0];
6779 nextWritePointer = handle->bufferPointer[0];
6781 DWORD endWrite, leadPointer;
6783 // Find out where the read and "safe write" pointers are.
6784 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6785 if ( FAILED( result ) ) {
6786 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6787 errorText_ = errorStream_.str();
6788 MUTEX_UNLOCK( &stream_.mutex );
6789 error( RtAudioError::SYSTEM_ERROR );
6793 // We will copy our output buffer into the region between
6794 // safeWritePointer and leadPointer. If leadPointer is not
6795 // beyond the next endWrite position, wait until it is.
6796 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6797 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6798 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6799 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6800 endWrite = nextWritePointer + bufferBytes;
6802 // Check whether the entire write region is behind the play pointer.
6803 if ( leadPointer >= endWrite ) break;
6805 // If we are here, then we must wait until the leadPointer advances
6806 // beyond the end of our next write region. We use the
6807 // Sleep() function to suspend operation until that happens.
6808 double millis = ( endWrite - leadPointer ) * 1000.0;
6809 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6810 if ( millis < 1.0 ) millis = 1.0;
6811 Sleep( (DWORD) millis );
6814 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6815 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6816 // We've strayed into the forbidden zone ... resync the read pointer.
6817 handle->xrun[0] = true;
6818 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6819 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6820 handle->bufferPointer[0] = nextWritePointer;
6821 endWrite = nextWritePointer + bufferBytes;
6824 // Lock free space in the buffer
6825 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6826 &bufferSize1, &buffer2, &bufferSize2, 0 );
6827 if ( FAILED( result ) ) {
6828 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6829 errorText_ = errorStream_.str();
6830 MUTEX_UNLOCK( &stream_.mutex );
6831 error( RtAudioError::SYSTEM_ERROR );
6835 // Copy our buffer into the DS buffer
6836 CopyMemory( buffer1, buffer, bufferSize1 );
6837 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6839 // Update our buffer offset and unlock sound buffer
6840 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6841 if ( FAILED( result ) ) {
6842 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6843 errorText_ = errorStream_.str();
6844 MUTEX_UNLOCK( &stream_.mutex );
6845 error( RtAudioError::SYSTEM_ERROR );
6848 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6849 handle->bufferPointer[0] = nextWritePointer;
6852 // Don't bother draining input
6853 if ( handle->drainCounter ) {
6854 handle->drainCounter++;
6858 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6860 // Setup parameters.
6861 if ( stream_.doConvertBuffer[1] ) {
6862 buffer = stream_.deviceBuffer;
6863 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6864 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6867 buffer = stream_.userBuffer[1];
6868 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6869 bufferBytes *= formatBytes( stream_.userFormat );
6872 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6873 long nextReadPointer = handle->bufferPointer[1];
6874 DWORD dsBufferSize = handle->dsBufferSize[1];
6876 // Find out where the write and "safe read" pointers are.
6877 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6878 if ( FAILED( result ) ) {
6879 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6880 errorText_ = errorStream_.str();
6881 MUTEX_UNLOCK( &stream_.mutex );
6882 error( RtAudioError::SYSTEM_ERROR );
6886 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6887 DWORD endRead = nextReadPointer + bufferBytes;
6889 // Handling depends on whether we are INPUT or DUPLEX.
6890 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6891 // then a wait here will drag the write pointers into the forbidden zone.
6893 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6894 // it's in a safe position. This causes dropouts, but it seems to be the only
6895 // practical way to sync up the read and write pointers reliably, given the
6896 // the very complex relationship between phase and increment of the read and write
6899 // In order to minimize audible dropouts in DUPLEX mode, we will
6900 // provide a pre-roll period of 0.5 seconds in which we return
6901 // zeros from the read buffer while the pointers sync up.
6903 if ( stream_.mode == DUPLEX ) {
6904 if ( safeReadPointer < endRead ) {
6905 if ( duplexPrerollBytes <= 0 ) {
6906 // Pre-roll time over. Be more agressive.
6907 int adjustment = endRead-safeReadPointer;
6909 handle->xrun[1] = true;
6911 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6912 // and perform fine adjustments later.
6913 // - small adjustments: back off by twice as much.
6914 if ( adjustment >= 2*bufferBytes )
6915 nextReadPointer = safeReadPointer-2*bufferBytes;
6917 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6919 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6923 // In pre=roll time. Just do it.
6924 nextReadPointer = safeReadPointer - bufferBytes;
6925 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6927 endRead = nextReadPointer + bufferBytes;
6930 else { // mode == INPUT
6931 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6932 // See comments for playback.
6933 double millis = (endRead - safeReadPointer) * 1000.0;
6934 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6935 if ( millis < 1.0 ) millis = 1.0;
6936 Sleep( (DWORD) millis );
6938 // Wake up and find out where we are now.
6939 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6940 if ( FAILED( result ) ) {
6941 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6942 errorText_ = errorStream_.str();
6943 MUTEX_UNLOCK( &stream_.mutex );
6944 error( RtAudioError::SYSTEM_ERROR );
6948 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6952 // Lock free space in the buffer
6953 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6954 &bufferSize1, &buffer2, &bufferSize2, 0 );
6955 if ( FAILED( result ) ) {
6956 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6957 errorText_ = errorStream_.str();
6958 MUTEX_UNLOCK( &stream_.mutex );
6959 error( RtAudioError::SYSTEM_ERROR );
6963 if ( duplexPrerollBytes <= 0 ) {
6964 // Copy our buffer into the DS buffer
6965 CopyMemory( buffer, buffer1, bufferSize1 );
6966 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6969 memset( buffer, 0, bufferSize1 );
6970 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6971 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6974 // Update our buffer offset and unlock sound buffer
6975 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6976 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6977 if ( FAILED( result ) ) {
6978 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6979 errorText_ = errorStream_.str();
6980 MUTEX_UNLOCK( &stream_.mutex );
6981 error( RtAudioError::SYSTEM_ERROR );
6984 handle->bufferPointer[1] = nextReadPointer;
6986 // No byte swapping necessary in DirectSound implementation.
6988 // If necessary, convert 8-bit data from unsigned to signed.
6989 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6990 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6992 // Do buffer conversion if necessary.
6993 if ( stream_.doConvertBuffer[1] )
6994 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6998 MUTEX_UNLOCK( &stream_.mutex );
6999 RtApi::tickStreamTime();
7002 // Definitions for utility functions and callbacks
7003 // specific to the DirectSound implementation.
7005 static unsigned __stdcall callbackHandler( void *ptr )
7007 CallbackInfo *info = (CallbackInfo *) ptr;
7008 RtApiDs *object = (RtApiDs *) info->object;
7009 bool* isRunning = &info->isRunning;
7011 while ( *isRunning == true ) {
7012 object->callbackEvent();
7019 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7020 LPCTSTR description,
7024 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7025 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7028 bool validDevice = false;
7029 if ( probeInfo.isInput == true ) {
7031 LPDIRECTSOUNDCAPTURE object;
7033 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7034 if ( hr != DS_OK ) return TRUE;
7036 caps.dwSize = sizeof(caps);
7037 hr = object->GetCaps( &caps );
7038 if ( hr == DS_OK ) {
7039 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7046 LPDIRECTSOUND object;
7047 hr = DirectSoundCreate( lpguid, &object, NULL );
7048 if ( hr != DS_OK ) return TRUE;
7050 caps.dwSize = sizeof(caps);
7051 hr = object->GetCaps( &caps );
7052 if ( hr == DS_OK ) {
7053 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7059 // If good device, then save its name and guid.
7060 std::string name = convertCharPointerToStdString( description );
7061 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7062 if ( lpguid == NULL )
7063 name = "Default Device";
7064 if ( validDevice ) {
7065 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7066 if ( dsDevices[i].name == name ) {
7067 dsDevices[i].found = true;
7068 if ( probeInfo.isInput ) {
7069 dsDevices[i].id[1] = lpguid;
7070 dsDevices[i].validId[1] = true;
7073 dsDevices[i].id[0] = lpguid;
7074 dsDevices[i].validId[0] = true;
7082 device.found = true;
7083 if ( probeInfo.isInput ) {
7084 device.id[1] = lpguid;
7085 device.validId[1] = true;
7088 device.id[0] = lpguid;
7089 device.validId[0] = true;
7091 dsDevices.push_back( device );
7097 static const char* getErrorString( int code )
7101 case DSERR_ALLOCATED:
7102 return "Already allocated";
7104 case DSERR_CONTROLUNAVAIL:
7105 return "Control unavailable";
7107 case DSERR_INVALIDPARAM:
7108 return "Invalid parameter";
7110 case DSERR_INVALIDCALL:
7111 return "Invalid call";
7114 return "Generic error";
7116 case DSERR_PRIOLEVELNEEDED:
7117 return "Priority level needed";
7119 case DSERR_OUTOFMEMORY:
7120 return "Out of memory";
7122 case DSERR_BADFORMAT:
7123 return "The sample rate or the channel format is not supported";
7125 case DSERR_UNSUPPORTED:
7126 return "Not supported";
7128 case DSERR_NODRIVER:
7131 case DSERR_ALREADYINITIALIZED:
7132 return "Already initialized";
7134 case DSERR_NOAGGREGATION:
7135 return "No aggregation";
7137 case DSERR_BUFFERLOST:
7138 return "Buffer lost";
7140 case DSERR_OTHERAPPHASPRIO:
7141 return "Another application already has priority";
7143 case DSERR_UNINITIALIZED:
7144 return "Uninitialized";
7147 return "DirectSound unknown error";
7150 //******************** End of __WINDOWS_DS__ *********************//
7154 #if defined(__LINUX_ALSA__)
7156 #include <alsa/asoundlib.h>
7159 // A structure to hold various information related to the ALSA API
7162 snd_pcm_t *handles[2];
7165 pthread_cond_t runnable_cv;
7169 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7172 static void *alsaCallbackHandler( void * ptr );
7174 RtApiAlsa :: RtApiAlsa()
7176 // Nothing to do here.
7179 RtApiAlsa :: ~RtApiAlsa()
7181 if ( stream_.state != STREAM_CLOSED ) closeStream();
7184 unsigned int RtApiAlsa :: getDeviceCount( void )
7186 unsigned nDevices = 0;
7187 int result, subdevice, card;
7189 snd_ctl_t *handle = 0;
7191 // Count cards and devices
7193 snd_card_next( &card );
7194 while ( card >= 0 ) {
7195 sprintf( name, "hw:%d", card );
7196 result = snd_ctl_open( &handle, name, 0 );
7199 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7200 errorText_ = errorStream_.str();
7201 error( RtAudioError::WARNING );
7206 result = snd_ctl_pcm_next_device( handle, &subdevice );
7208 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7209 errorText_ = errorStream_.str();
7210 error( RtAudioError::WARNING );
7213 if ( subdevice < 0 )
7219 snd_ctl_close( handle );
7220 snd_card_next( &card );
7223 result = snd_ctl_open( &handle, "default", 0 );
7226 snd_ctl_close( handle );
7232 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7234 RtAudio::DeviceInfo info;
7235 info.probed = false;
7237 unsigned nDevices = 0;
7238 int result, subdevice, card;
7240 snd_ctl_t *chandle = 0;
7242 // Count cards and devices
7245 snd_card_next( &card );
7246 while ( card >= 0 ) {
7247 sprintf( name, "hw:%d", card );
7248 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7251 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7252 errorText_ = errorStream_.str();
7253 error( RtAudioError::WARNING );
7258 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7260 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7261 errorText_ = errorStream_.str();
7262 error( RtAudioError::WARNING );
7265 if ( subdevice < 0 ) break;
7266 if ( nDevices == device ) {
7267 sprintf( name, "hw:%d,%d", card, subdevice );
7274 snd_ctl_close( chandle );
7275 snd_card_next( &card );
7278 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7279 if ( result == 0 ) {
7280 if ( nDevices == device ) {
7281 strcpy( name, "default" );
7287 if ( nDevices == 0 ) {
7288 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7289 error( RtAudioError::INVALID_USE );
7293 if ( device >= nDevices ) {
7294 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7295 error( RtAudioError::INVALID_USE );
7301 // If a stream is already open, we cannot probe the stream devices.
7302 // Thus, use the saved results.
7303 if ( stream_.state != STREAM_CLOSED &&
7304 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7305 snd_ctl_close( chandle );
7306 if ( device >= devices_.size() ) {
7307 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7308 error( RtAudioError::WARNING );
7311 return devices_[ device ];
7314 int openMode = SND_PCM_ASYNC;
7315 snd_pcm_stream_t stream;
7316 snd_pcm_info_t *pcminfo;
7317 snd_pcm_info_alloca( &pcminfo );
7319 snd_pcm_hw_params_t *params;
7320 snd_pcm_hw_params_alloca( ¶ms );
7322 // First try for playback unless default device (which has subdev -1)
7323 stream = SND_PCM_STREAM_PLAYBACK;
7324 snd_pcm_info_set_stream( pcminfo, stream );
7325 if ( subdevice != -1 ) {
7326 snd_pcm_info_set_device( pcminfo, subdevice );
7327 snd_pcm_info_set_subdevice( pcminfo, 0 );
7329 result = snd_ctl_pcm_info( chandle, pcminfo );
7331 // Device probably doesn't support playback.
7336 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7338 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7339 errorText_ = errorStream_.str();
7340 error( RtAudioError::WARNING );
7344 // The device is open ... fill the parameter structure.
7345 result = snd_pcm_hw_params_any( phandle, params );
7347 snd_pcm_close( phandle );
7348 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7349 errorText_ = errorStream_.str();
7350 error( RtAudioError::WARNING );
7354 // Get output channel information.
7356 result = snd_pcm_hw_params_get_channels_max( params, &value );
7358 snd_pcm_close( phandle );
7359 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7360 errorText_ = errorStream_.str();
7361 error( RtAudioError::WARNING );
7364 info.outputChannels = value;
7365 snd_pcm_close( phandle );
7368 stream = SND_PCM_STREAM_CAPTURE;
7369 snd_pcm_info_set_stream( pcminfo, stream );
7371 // Now try for capture unless default device (with subdev = -1)
7372 if ( subdevice != -1 ) {
7373 result = snd_ctl_pcm_info( chandle, pcminfo );
7374 snd_ctl_close( chandle );
7376 // Device probably doesn't support capture.
7377 if ( info.outputChannels == 0 ) return info;
7378 goto probeParameters;
7382 snd_ctl_close( chandle );
7384 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7386 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7387 errorText_ = errorStream_.str();
7388 error( RtAudioError::WARNING );
7389 if ( info.outputChannels == 0 ) return info;
7390 goto probeParameters;
7393 // The device is open ... fill the parameter structure.
7394 result = snd_pcm_hw_params_any( phandle, params );
7396 snd_pcm_close( phandle );
7397 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7398 errorText_ = errorStream_.str();
7399 error( RtAudioError::WARNING );
7400 if ( info.outputChannels == 0 ) return info;
7401 goto probeParameters;
7404 result = snd_pcm_hw_params_get_channels_max( params, &value );
7406 snd_pcm_close( phandle );
7407 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7408 errorText_ = errorStream_.str();
7409 error( RtAudioError::WARNING );
7410 if ( info.outputChannels == 0 ) return info;
7411 goto probeParameters;
7413 info.inputChannels = value;
7414 snd_pcm_close( phandle );
7416 // If device opens for both playback and capture, we determine the channels.
7417 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7418 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7420 // ALSA doesn't provide default devices so we'll use the first available one.
7421 if ( device == 0 && info.outputChannels > 0 )
7422 info.isDefaultOutput = true;
7423 if ( device == 0 && info.inputChannels > 0 )
7424 info.isDefaultInput = true;
7427 // At this point, we just need to figure out the supported data
7428 // formats and sample rates. We'll proceed by opening the device in
7429 // the direction with the maximum number of channels, or playback if
7430 // they are equal. This might limit our sample rate options, but so
7433 if ( info.outputChannels >= info.inputChannels )
7434 stream = SND_PCM_STREAM_PLAYBACK;
7436 stream = SND_PCM_STREAM_CAPTURE;
7437 snd_pcm_info_set_stream( pcminfo, stream );
7439 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7441 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7442 errorText_ = errorStream_.str();
7443 error( RtAudioError::WARNING );
7447 // The device is open ... fill the parameter structure.
7448 result = snd_pcm_hw_params_any( phandle, params );
7450 snd_pcm_close( phandle );
7451 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7452 errorText_ = errorStream_.str();
7453 error( RtAudioError::WARNING );
7457 // Test our discrete set of sample rate values.
7458 info.sampleRates.clear();
7459 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7460 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7461 info.sampleRates.push_back( SAMPLE_RATES[i] );
7463 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7464 info.preferredSampleRate = SAMPLE_RATES[i];
7467 if ( info.sampleRates.size() == 0 ) {
7468 snd_pcm_close( phandle );
7469 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7470 errorText_ = errorStream_.str();
7471 error( RtAudioError::WARNING );
7475 // Probe the supported data formats ... we don't care about endian-ness just yet
7476 snd_pcm_format_t format;
7477 info.nativeFormats = 0;
7478 format = SND_PCM_FORMAT_S8;
7479 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7480 info.nativeFormats |= RTAUDIO_SINT8;
7481 format = SND_PCM_FORMAT_S16;
7482 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7483 info.nativeFormats |= RTAUDIO_SINT16;
7484 format = SND_PCM_FORMAT_S24;
7485 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7486 info.nativeFormats |= RTAUDIO_SINT24;
7487 format = SND_PCM_FORMAT_S32;
7488 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7489 info.nativeFormats |= RTAUDIO_SINT32;
7490 format = SND_PCM_FORMAT_FLOAT;
7491 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7492 info.nativeFormats |= RTAUDIO_FLOAT32;
7493 format = SND_PCM_FORMAT_FLOAT64;
7494 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7495 info.nativeFormats |= RTAUDIO_FLOAT64;
7497 // Check that we have at least one supported format
7498 if ( info.nativeFormats == 0 ) {
7499 snd_pcm_close( phandle );
7500 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7501 errorText_ = errorStream_.str();
7502 error( RtAudioError::WARNING );
7506 // Get the device name
7508 result = snd_card_get_name( card, &cardname );
7509 if ( result >= 0 ) {
7510 sprintf( name, "hw:%s,%d", cardname, subdevice );
7515 // That's all ... close the device and return
7516 snd_pcm_close( phandle );
7521 void RtApiAlsa :: saveDeviceInfo( void )
7525 unsigned int nDevices = getDeviceCount();
7526 devices_.resize( nDevices );
7527 for ( unsigned int i=0; i<nDevices; i++ )
7528 devices_[i] = getDeviceInfo( i );
7531 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7532 unsigned int firstChannel, unsigned int sampleRate,
7533 RtAudioFormat format, unsigned int *bufferSize,
7534 RtAudio::StreamOptions *options )
7537 #if defined(__RTAUDIO_DEBUG__)
7539 snd_output_stdio_attach(&out, stderr, 0);
7542 // I'm not using the "plug" interface ... too much inconsistent behavior.
7544 unsigned nDevices = 0;
7545 int result, subdevice, card;
7549 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7550 snprintf(name, sizeof(name), "%s", "default");
7552 // Count cards and devices
7554 snd_card_next( &card );
7555 while ( card >= 0 ) {
7556 sprintf( name, "hw:%d", card );
7557 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7559 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7560 errorText_ = errorStream_.str();
7565 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7566 if ( result < 0 ) break;
7567 if ( subdevice < 0 ) break;
7568 if ( nDevices == device ) {
7569 sprintf( name, "hw:%d,%d", card, subdevice );
7570 snd_ctl_close( chandle );
7575 snd_ctl_close( chandle );
7576 snd_card_next( &card );
7579 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7580 if ( result == 0 ) {
7581 if ( nDevices == device ) {
7582 strcpy( name, "default" );
7583 snd_ctl_close( chandle );
7588 snd_ctl_close( chandle );
7590 if ( nDevices == 0 ) {
7591 // This should not happen because a check is made before this function is called.
7592 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7596 if ( device >= nDevices ) {
7597 // This should not happen because a check is made before this function is called.
7598 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7605 // The getDeviceInfo() function will not work for a device that is
7606 // already open. Thus, we'll probe the system before opening a
7607 // stream and save the results for use by getDeviceInfo().
7608 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7609 this->saveDeviceInfo();
7611 snd_pcm_stream_t stream;
7612 if ( mode == OUTPUT )
7613 stream = SND_PCM_STREAM_PLAYBACK;
7615 stream = SND_PCM_STREAM_CAPTURE;
7618 int openMode = SND_PCM_ASYNC;
7619 result = snd_pcm_open( &phandle, name, stream, openMode );
7621 if ( mode == OUTPUT )
7622 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7624 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7625 errorText_ = errorStream_.str();
7629 // Fill the parameter structure.
7630 snd_pcm_hw_params_t *hw_params;
7631 snd_pcm_hw_params_alloca( &hw_params );
7632 result = snd_pcm_hw_params_any( phandle, hw_params );
7634 snd_pcm_close( phandle );
7635 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7636 errorText_ = errorStream_.str();
7640 #if defined(__RTAUDIO_DEBUG__)
7641 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7642 snd_pcm_hw_params_dump( hw_params, out );
7645 // Set access ... check user preference.
7646 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7647 stream_.userInterleaved = false;
7648 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7650 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7651 stream_.deviceInterleaved[mode] = true;
7654 stream_.deviceInterleaved[mode] = false;
7657 stream_.userInterleaved = true;
7658 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7660 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7661 stream_.deviceInterleaved[mode] = false;
7664 stream_.deviceInterleaved[mode] = true;
7668 snd_pcm_close( phandle );
7669 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7670 errorText_ = errorStream_.str();
7674 // Determine how to set the device format.
7675 stream_.userFormat = format;
7676 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7678 if ( format == RTAUDIO_SINT8 )
7679 deviceFormat = SND_PCM_FORMAT_S8;
7680 else if ( format == RTAUDIO_SINT16 )
7681 deviceFormat = SND_PCM_FORMAT_S16;
7682 else if ( format == RTAUDIO_SINT24 )
7683 deviceFormat = SND_PCM_FORMAT_S24;
7684 else if ( format == RTAUDIO_SINT32 )
7685 deviceFormat = SND_PCM_FORMAT_S32;
7686 else if ( format == RTAUDIO_FLOAT32 )
7687 deviceFormat = SND_PCM_FORMAT_FLOAT;
7688 else if ( format == RTAUDIO_FLOAT64 )
7689 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7691 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7692 stream_.deviceFormat[mode] = format;
7696 // The user requested format is not natively supported by the device.
7697 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7698 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7699 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7703 deviceFormat = SND_PCM_FORMAT_FLOAT;
7704 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7705 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7709 deviceFormat = SND_PCM_FORMAT_S32;
7710 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7711 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7715 deviceFormat = SND_PCM_FORMAT_S24;
7716 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7717 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7721 deviceFormat = SND_PCM_FORMAT_S16;
7722 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7723 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7727 deviceFormat = SND_PCM_FORMAT_S8;
7728 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7729 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7733 // If we get here, no supported format was found.
7734 snd_pcm_close( phandle );
7735 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7736 errorText_ = errorStream_.str();
7740 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7742 snd_pcm_close( phandle );
7743 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7744 errorText_ = errorStream_.str();
7748 // Determine whether byte-swaping is necessary.
7749 stream_.doByteSwap[mode] = false;
7750 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7751 result = snd_pcm_format_cpu_endian( deviceFormat );
7753 stream_.doByteSwap[mode] = true;
7754 else if (result < 0) {
7755 snd_pcm_close( phandle );
7756 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7757 errorText_ = errorStream_.str();
7762 // Set the sample rate.
7763 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7765 snd_pcm_close( phandle );
7766 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7767 errorText_ = errorStream_.str();
7771 // Determine the number of channels for this device. We support a possible
7772 // minimum device channel number > than the value requested by the user.
7773 stream_.nUserChannels[mode] = channels;
7775 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7776 unsigned int deviceChannels = value;
7777 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7778 snd_pcm_close( phandle );
7779 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7780 errorText_ = errorStream_.str();
7784 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7786 snd_pcm_close( phandle );
7787 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7788 errorText_ = errorStream_.str();
7791 deviceChannels = value;
7792 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7793 stream_.nDeviceChannels[mode] = deviceChannels;
7795 // Set the device channels.
7796 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7798 snd_pcm_close( phandle );
7799 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7800 errorText_ = errorStream_.str();
7804 // Set the buffer (or period) size.
7806 snd_pcm_uframes_t periodSize = *bufferSize;
7807 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7809 snd_pcm_close( phandle );
7810 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7811 errorText_ = errorStream_.str();
7814 *bufferSize = periodSize;
7816 // Set the buffer number, which in ALSA is referred to as the "period".
7817 unsigned int periods = 0;
7818 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7819 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7820 if ( periods < 2 ) periods = 4; // a fairly safe default value
7821 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7823 snd_pcm_close( phandle );
7824 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7825 errorText_ = errorStream_.str();
7829 // If attempting to setup a duplex stream, the bufferSize parameter
7830 // MUST be the same in both directions!
7831 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7832 snd_pcm_close( phandle );
7833 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7834 errorText_ = errorStream_.str();
7838 stream_.bufferSize = *bufferSize;
7840 // Install the hardware configuration
7841 result = snd_pcm_hw_params( phandle, hw_params );
7843 snd_pcm_close( phandle );
7844 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7845 errorText_ = errorStream_.str();
7849 #if defined(__RTAUDIO_DEBUG__)
7850 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7851 snd_pcm_hw_params_dump( hw_params, out );
7854 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7855 snd_pcm_sw_params_t *sw_params = NULL;
7856 snd_pcm_sw_params_alloca( &sw_params );
7857 snd_pcm_sw_params_current( phandle, sw_params );
7858 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7859 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7860 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7862 // The following two settings were suggested by Theo Veenker
7863 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7864 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7866 // here are two options for a fix
7867 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7868 snd_pcm_uframes_t val;
7869 snd_pcm_sw_params_get_boundary( sw_params, &val );
7870 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7872 result = snd_pcm_sw_params( phandle, sw_params );
7874 snd_pcm_close( phandle );
7875 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7876 errorText_ = errorStream_.str();
7880 #if defined(__RTAUDIO_DEBUG__)
7881 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7882 snd_pcm_sw_params_dump( sw_params, out );
7885 // Set flags for buffer conversion
7886 stream_.doConvertBuffer[mode] = false;
7887 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7888 stream_.doConvertBuffer[mode] = true;
7889 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7890 stream_.doConvertBuffer[mode] = true;
7891 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7892 stream_.nUserChannels[mode] > 1 )
7893 stream_.doConvertBuffer[mode] = true;
7895 // Allocate the ApiHandle if necessary and then save.
7896 AlsaHandle *apiInfo = 0;
7897 if ( stream_.apiHandle == 0 ) {
7899 apiInfo = (AlsaHandle *) new AlsaHandle;
7901 catch ( std::bad_alloc& ) {
7902 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7906 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7907 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7911 stream_.apiHandle = (void *) apiInfo;
7912 apiInfo->handles[0] = 0;
7913 apiInfo->handles[1] = 0;
7916 apiInfo = (AlsaHandle *) stream_.apiHandle;
7918 apiInfo->handles[mode] = phandle;
7921 // Allocate necessary internal buffers.
7922 unsigned long bufferBytes;
7923 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7924 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7925 if ( stream_.userBuffer[mode] == NULL ) {
7926 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7930 if ( stream_.doConvertBuffer[mode] ) {
7932 bool makeBuffer = true;
7933 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7934 if ( mode == INPUT ) {
7935 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7936 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7937 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7942 bufferBytes *= *bufferSize;
7943 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7944 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7945 if ( stream_.deviceBuffer == NULL ) {
7946 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7952 stream_.sampleRate = sampleRate;
7953 stream_.nBuffers = periods;
7954 stream_.device[mode] = device;
7955 stream_.state = STREAM_STOPPED;
7957 // Setup the buffer conversion information structure.
7958 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7960 // Setup thread if necessary.
7961 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7962 // We had already set up an output stream.
7963 stream_.mode = DUPLEX;
7964 // Link the streams if possible.
7965 apiInfo->synchronized = false;
7966 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7967 apiInfo->synchronized = true;
7969 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7970 error( RtAudioError::WARNING );
7974 stream_.mode = mode;
7976 // Setup callback thread.
7977 stream_.callbackInfo.object = (void *) this;
7979 // Set the thread attributes for joinable and realtime scheduling
7980 // priority (optional). The higher priority will only take affect
7981 // if the program is run as root or suid. Note, under Linux
7982 // processes with CAP_SYS_NICE privilege, a user can change
7983 // scheduling policy and priority (thus need not be root). See
7984 // POSIX "capabilities".
7985 pthread_attr_t attr;
7986 pthread_attr_init( &attr );
7987 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7988 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7989 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7990 stream_.callbackInfo.doRealtime = true;
7991 struct sched_param param;
7992 int priority = options->priority;
7993 int min = sched_get_priority_min( SCHED_RR );
7994 int max = sched_get_priority_max( SCHED_RR );
7995 if ( priority < min ) priority = min;
7996 else if ( priority > max ) priority = max;
7997 param.sched_priority = priority;
7999 // Set the policy BEFORE the priority. Otherwise it fails.
8000 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8001 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8002 // This is definitely required. Otherwise it fails.
8003 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8004 pthread_attr_setschedparam(&attr, ¶m);
8007 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8009 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8012 stream_.callbackInfo.isRunning = true;
8013 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8014 pthread_attr_destroy( &attr );
8016 // Failed. Try instead with default attributes.
8017 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8019 stream_.callbackInfo.isRunning = false;
8020 errorText_ = "RtApiAlsa::error creating callback thread!";
8030 pthread_cond_destroy( &apiInfo->runnable_cv );
8031 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8032 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8034 stream_.apiHandle = 0;
8037 if ( phandle) snd_pcm_close( phandle );
8039 for ( int i=0; i<2; i++ ) {
8040 if ( stream_.userBuffer[i] ) {
8041 free( stream_.userBuffer[i] );
8042 stream_.userBuffer[i] = 0;
8046 if ( stream_.deviceBuffer ) {
8047 free( stream_.deviceBuffer );
8048 stream_.deviceBuffer = 0;
8051 stream_.state = STREAM_CLOSED;
8055 void RtApiAlsa :: closeStream()
8057 if ( stream_.state == STREAM_CLOSED ) {
8058 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8059 error( RtAudioError::WARNING );
8063 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8064 stream_.callbackInfo.isRunning = false;
8065 MUTEX_LOCK( &stream_.mutex );
8066 if ( stream_.state == STREAM_STOPPED ) {
8067 apiInfo->runnable = true;
8068 pthread_cond_signal( &apiInfo->runnable_cv );
8070 MUTEX_UNLOCK( &stream_.mutex );
8071 pthread_join( stream_.callbackInfo.thread, NULL );
8073 if ( stream_.state == STREAM_RUNNING ) {
8074 stream_.state = STREAM_STOPPED;
8075 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8076 snd_pcm_drop( apiInfo->handles[0] );
8077 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8078 snd_pcm_drop( apiInfo->handles[1] );
8082 pthread_cond_destroy( &apiInfo->runnable_cv );
8083 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8084 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8086 stream_.apiHandle = 0;
8089 for ( int i=0; i<2; i++ ) {
8090 if ( stream_.userBuffer[i] ) {
8091 free( stream_.userBuffer[i] );
8092 stream_.userBuffer[i] = 0;
8096 if ( stream_.deviceBuffer ) {
8097 free( stream_.deviceBuffer );
8098 stream_.deviceBuffer = 0;
8101 stream_.mode = UNINITIALIZED;
8102 stream_.state = STREAM_CLOSED;
8105 void RtApiAlsa :: startStream()
8107 // This method calls snd_pcm_prepare if the device isn't already in that state.
8110 if ( stream_.state == STREAM_RUNNING ) {
8111 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8112 error( RtAudioError::WARNING );
8116 MUTEX_LOCK( &stream_.mutex );
8118 #if defined( HAVE_GETTIMEOFDAY )
8119 gettimeofday( &stream_.lastTickTimestamp, NULL );
8123 snd_pcm_state_t state;
8124 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8125 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8126 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8127 state = snd_pcm_state( handle[0] );
8128 if ( state != SND_PCM_STATE_PREPARED ) {
8129 result = snd_pcm_prepare( handle[0] );
8131 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8132 errorText_ = errorStream_.str();
8138 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8139 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8140 state = snd_pcm_state( handle[1] );
8141 if ( state != SND_PCM_STATE_PREPARED ) {
8142 result = snd_pcm_prepare( handle[1] );
8144 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8145 errorText_ = errorStream_.str();
8151 stream_.state = STREAM_RUNNING;
8154 apiInfo->runnable = true;
8155 pthread_cond_signal( &apiInfo->runnable_cv );
8156 MUTEX_UNLOCK( &stream_.mutex );
8158 if ( result >= 0 ) return;
8159 error( RtAudioError::SYSTEM_ERROR );
8162 void RtApiAlsa :: stopStream()
8165 if ( stream_.state == STREAM_STOPPED ) {
8166 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8167 error( RtAudioError::WARNING );
8171 stream_.state = STREAM_STOPPED;
8172 MUTEX_LOCK( &stream_.mutex );
8175 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8176 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8177 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8178 if ( apiInfo->synchronized )
8179 result = snd_pcm_drop( handle[0] );
8181 result = snd_pcm_drain( handle[0] );
8183 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8184 errorText_ = errorStream_.str();
8189 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8190 result = snd_pcm_drop( handle[1] );
8192 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8193 errorText_ = errorStream_.str();
8199 apiInfo->runnable = false; // fixes high CPU usage when stopped
8200 MUTEX_UNLOCK( &stream_.mutex );
8202 if ( result >= 0 ) return;
8203 error( RtAudioError::SYSTEM_ERROR );
8206 void RtApiAlsa :: abortStream()
8209 if ( stream_.state == STREAM_STOPPED ) {
8210 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8211 error( RtAudioError::WARNING );
8215 stream_.state = STREAM_STOPPED;
8216 MUTEX_LOCK( &stream_.mutex );
8219 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8220 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8221 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8222 result = snd_pcm_drop( handle[0] );
8224 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8225 errorText_ = errorStream_.str();
8230 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8231 result = snd_pcm_drop( handle[1] );
8233 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8234 errorText_ = errorStream_.str();
8240 apiInfo->runnable = false; // fixes high CPU usage when stopped
8241 MUTEX_UNLOCK( &stream_.mutex );
8243 if ( result >= 0 ) return;
8244 error( RtAudioError::SYSTEM_ERROR );
8247 void RtApiAlsa :: callbackEvent()
8249 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8250 if ( stream_.state == STREAM_STOPPED ) {
8251 MUTEX_LOCK( &stream_.mutex );
8252 while ( !apiInfo->runnable )
8253 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8255 if ( stream_.state != STREAM_RUNNING ) {
8256 MUTEX_UNLOCK( &stream_.mutex );
8259 MUTEX_UNLOCK( &stream_.mutex );
8262 if ( stream_.state == STREAM_CLOSED ) {
8263 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8264 error( RtAudioError::WARNING );
8268 int doStopStream = 0;
8269 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8270 double streamTime = getStreamTime();
8271 RtAudioStreamStatus status = 0;
8272 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8273 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8274 apiInfo->xrun[0] = false;
8276 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8277 status |= RTAUDIO_INPUT_OVERFLOW;
8278 apiInfo->xrun[1] = false;
8280 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8281 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8283 if ( doStopStream == 2 ) {
8288 MUTEX_LOCK( &stream_.mutex );
8290 // The state might change while waiting on a mutex.
8291 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8297 snd_pcm_sframes_t frames;
8298 RtAudioFormat format;
8299 handle = (snd_pcm_t **) apiInfo->handles;
8301 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8303 // Setup parameters.
8304 if ( stream_.doConvertBuffer[1] ) {
8305 buffer = stream_.deviceBuffer;
8306 channels = stream_.nDeviceChannels[1];
8307 format = stream_.deviceFormat[1];
8310 buffer = stream_.userBuffer[1];
8311 channels = stream_.nUserChannels[1];
8312 format = stream_.userFormat;
8315 // Read samples from device in interleaved/non-interleaved format.
8316 if ( stream_.deviceInterleaved[1] )
8317 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8319 void *bufs[channels];
8320 size_t offset = stream_.bufferSize * formatBytes( format );
8321 for ( int i=0; i<channels; i++ )
8322 bufs[i] = (void *) (buffer + (i * offset));
8323 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8326 if ( result < (int) stream_.bufferSize ) {
8327 // Either an error or overrun occured.
8328 if ( result == -EPIPE ) {
8329 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8330 if ( state == SND_PCM_STATE_XRUN ) {
8331 apiInfo->xrun[1] = true;
8332 result = snd_pcm_prepare( handle[1] );
8334 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8335 errorText_ = errorStream_.str();
8339 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8340 errorText_ = errorStream_.str();
8344 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8345 errorText_ = errorStream_.str();
8347 error( RtAudioError::WARNING );
8351 // Do byte swapping if necessary.
8352 if ( stream_.doByteSwap[1] )
8353 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8355 // Do buffer conversion if necessary.
8356 if ( stream_.doConvertBuffer[1] )
8357 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8359 // Check stream latency
8360 result = snd_pcm_delay( handle[1], &frames );
8361 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8366 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8368 // Setup parameters and do buffer conversion if necessary.
8369 if ( stream_.doConvertBuffer[0] ) {
8370 buffer = stream_.deviceBuffer;
8371 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8372 channels = stream_.nDeviceChannels[0];
8373 format = stream_.deviceFormat[0];
8376 buffer = stream_.userBuffer[0];
8377 channels = stream_.nUserChannels[0];
8378 format = stream_.userFormat;
8381 // Do byte swapping if necessary.
8382 if ( stream_.doByteSwap[0] )
8383 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8385 // Write samples to device in interleaved/non-interleaved format.
8386 if ( stream_.deviceInterleaved[0] )
8387 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8389 void *bufs[channels];
8390 size_t offset = stream_.bufferSize * formatBytes( format );
8391 for ( int i=0; i<channels; i++ )
8392 bufs[i] = (void *) (buffer + (i * offset));
8393 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8396 if ( result < (int) stream_.bufferSize ) {
8397 // Either an error or underrun occured.
8398 if ( result == -EPIPE ) {
8399 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8400 if ( state == SND_PCM_STATE_XRUN ) {
8401 apiInfo->xrun[0] = true;
8402 result = snd_pcm_prepare( handle[0] );
8404 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8405 errorText_ = errorStream_.str();
8408 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8411 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8412 errorText_ = errorStream_.str();
8416 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8417 errorText_ = errorStream_.str();
8419 error( RtAudioError::WARNING );
8423 // Check stream latency
8424 result = snd_pcm_delay( handle[0], &frames );
8425 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8429 MUTEX_UNLOCK( &stream_.mutex );
8431 RtApi::tickStreamTime();
8432 if ( doStopStream == 1 ) this->stopStream();
8435 static void *alsaCallbackHandler( void *ptr )
8437 CallbackInfo *info = (CallbackInfo *) ptr;
8438 RtApiAlsa *object = (RtApiAlsa *) info->object;
8439 bool *isRunning = &info->isRunning;
8441 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8442 if ( info->doRealtime ) {
8443 std::cerr << "RtAudio alsa: " <<
8444 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8445 "running realtime scheduling" << std::endl;
8449 while ( *isRunning == true ) {
8450 pthread_testcancel();
8451 object->callbackEvent();
8454 pthread_exit( NULL );
8457 //******************** End of __LINUX_ALSA__ *********************//
8460 #if defined(__LINUX_PULSE__)
8462 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8463 // and Tristan Matthews.
8465 #include <pulse/error.h>
8466 #include <pulse/simple.h>
8469 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8470 44100, 48000, 96000, 0};
8472 struct rtaudio_pa_format_mapping_t {
8473 RtAudioFormat rtaudio_format;
8474 pa_sample_format_t pa_format;
8477 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8478 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8479 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8480 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8481 {0, PA_SAMPLE_INVALID}};
8483 struct PulseAudioHandle {
8487 pthread_cond_t runnable_cv;
8489 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8492 RtApiPulse::~RtApiPulse()
8494 if ( stream_.state != STREAM_CLOSED )
8498 unsigned int RtApiPulse::getDeviceCount( void )
8503 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8505 RtAudio::DeviceInfo info;
8507 info.name = "PulseAudio";
8508 info.outputChannels = 2;
8509 info.inputChannels = 2;
8510 info.duplexChannels = 2;
8511 info.isDefaultOutput = true;
8512 info.isDefaultInput = true;
8514 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8515 info.sampleRates.push_back( *sr );
8517 info.preferredSampleRate = 48000;
8518 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8523 static void *pulseaudio_callback( void * user )
8525 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8526 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8527 volatile bool *isRunning = &cbi->isRunning;
8529 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8530 if (cbi->doRealtime) {
8531 std::cerr << "RtAudio pulse: " <<
8532 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8533 "running realtime scheduling" << std::endl;
8537 while ( *isRunning ) {
8538 pthread_testcancel();
8539 context->callbackEvent();
8542 pthread_exit( NULL );
8545 void RtApiPulse::closeStream( void )
8547 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8549 stream_.callbackInfo.isRunning = false;
8551 MUTEX_LOCK( &stream_.mutex );
8552 if ( stream_.state == STREAM_STOPPED ) {
8553 pah->runnable = true;
8554 pthread_cond_signal( &pah->runnable_cv );
8556 MUTEX_UNLOCK( &stream_.mutex );
8558 pthread_join( pah->thread, 0 );
8559 if ( pah->s_play ) {
8560 pa_simple_flush( pah->s_play, NULL );
8561 pa_simple_free( pah->s_play );
8564 pa_simple_free( pah->s_rec );
8566 pthread_cond_destroy( &pah->runnable_cv );
8568 stream_.apiHandle = 0;
8571 if ( stream_.userBuffer[0] ) {
8572 free( stream_.userBuffer[0] );
8573 stream_.userBuffer[0] = 0;
8575 if ( stream_.userBuffer[1] ) {
8576 free( stream_.userBuffer[1] );
8577 stream_.userBuffer[1] = 0;
8580 stream_.state = STREAM_CLOSED;
8581 stream_.mode = UNINITIALIZED;
8584 void RtApiPulse::callbackEvent( void )
8586 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8588 if ( stream_.state == STREAM_STOPPED ) {
8589 MUTEX_LOCK( &stream_.mutex );
8590 while ( !pah->runnable )
8591 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8593 if ( stream_.state != STREAM_RUNNING ) {
8594 MUTEX_UNLOCK( &stream_.mutex );
8597 MUTEX_UNLOCK( &stream_.mutex );
8600 if ( stream_.state == STREAM_CLOSED ) {
8601 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8602 "this shouldn't happen!";
8603 error( RtAudioError::WARNING );
8607 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8608 double streamTime = getStreamTime();
8609 RtAudioStreamStatus status = 0;
8610 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8611 stream_.bufferSize, streamTime, status,
8612 stream_.callbackInfo.userData );
8614 if ( doStopStream == 2 ) {
8619 MUTEX_LOCK( &stream_.mutex );
8620 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8621 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8623 if ( stream_.state != STREAM_RUNNING )
8628 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8629 if ( stream_.doConvertBuffer[OUTPUT] ) {
8630 convertBuffer( stream_.deviceBuffer,
8631 stream_.userBuffer[OUTPUT],
8632 stream_.convertInfo[OUTPUT] );
8633 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8634 formatBytes( stream_.deviceFormat[OUTPUT] );
8636 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8637 formatBytes( stream_.userFormat );
8639 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8640 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8641 pa_strerror( pa_error ) << ".";
8642 errorText_ = errorStream_.str();
8643 error( RtAudioError::WARNING );
8647 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8648 if ( stream_.doConvertBuffer[INPUT] )
8649 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8650 formatBytes( stream_.deviceFormat[INPUT] );
8652 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8653 formatBytes( stream_.userFormat );
8655 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8656 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8657 pa_strerror( pa_error ) << ".";
8658 errorText_ = errorStream_.str();
8659 error( RtAudioError::WARNING );
8661 if ( stream_.doConvertBuffer[INPUT] ) {
8662 convertBuffer( stream_.userBuffer[INPUT],
8663 stream_.deviceBuffer,
8664 stream_.convertInfo[INPUT] );
8669 MUTEX_UNLOCK( &stream_.mutex );
8670 RtApi::tickStreamTime();
8672 if ( doStopStream == 1 )
8676 void RtApiPulse::startStream( void )
8678 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8680 if ( stream_.state == STREAM_CLOSED ) {
8681 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8682 error( RtAudioError::INVALID_USE );
8685 if ( stream_.state == STREAM_RUNNING ) {
8686 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8687 error( RtAudioError::WARNING );
8691 MUTEX_LOCK( &stream_.mutex );
8693 #if defined( HAVE_GETTIMEOFDAY )
8694 gettimeofday( &stream_.lastTickTimestamp, NULL );
8697 stream_.state = STREAM_RUNNING;
8699 pah->runnable = true;
8700 pthread_cond_signal( &pah->runnable_cv );
8701 MUTEX_UNLOCK( &stream_.mutex );
8704 void RtApiPulse::stopStream( void )
8706 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8708 if ( stream_.state == STREAM_CLOSED ) {
8709 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8710 error( RtAudioError::INVALID_USE );
8713 if ( stream_.state == STREAM_STOPPED ) {
8714 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8715 error( RtAudioError::WARNING );
8719 stream_.state = STREAM_STOPPED;
8720 MUTEX_LOCK( &stream_.mutex );
8722 if ( pah && pah->s_play ) {
8724 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8725 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8726 pa_strerror( pa_error ) << ".";
8727 errorText_ = errorStream_.str();
8728 MUTEX_UNLOCK( &stream_.mutex );
8729 error( RtAudioError::SYSTEM_ERROR );
8734 stream_.state = STREAM_STOPPED;
8735 MUTEX_UNLOCK( &stream_.mutex );
8738 void RtApiPulse::abortStream( void )
8740 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8742 if ( stream_.state == STREAM_CLOSED ) {
8743 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8744 error( RtAudioError::INVALID_USE );
8747 if ( stream_.state == STREAM_STOPPED ) {
8748 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8749 error( RtAudioError::WARNING );
8753 stream_.state = STREAM_STOPPED;
8754 MUTEX_LOCK( &stream_.mutex );
8756 if ( pah && pah->s_play ) {
8758 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8759 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8760 pa_strerror( pa_error ) << ".";
8761 errorText_ = errorStream_.str();
8762 MUTEX_UNLOCK( &stream_.mutex );
8763 error( RtAudioError::SYSTEM_ERROR );
8768 stream_.state = STREAM_STOPPED;
8769 MUTEX_UNLOCK( &stream_.mutex );
8772 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8773 unsigned int channels, unsigned int firstChannel,
8774 unsigned int sampleRate, RtAudioFormat format,
8775 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8777 PulseAudioHandle *pah = 0;
8778 unsigned long bufferBytes = 0;
8781 if ( device != 0 ) return false;
8782 if ( mode != INPUT && mode != OUTPUT ) return false;
8783 if ( channels != 1 && channels != 2 ) {
8784 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8787 ss.channels = channels;
8789 if ( firstChannel != 0 ) return false;
8791 bool sr_found = false;
8792 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8793 if ( sampleRate == *sr ) {
8795 stream_.sampleRate = sampleRate;
8796 ss.rate = sampleRate;
8801 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8806 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8807 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8808 if ( format == sf->rtaudio_format ) {
8810 stream_.userFormat = sf->rtaudio_format;
8811 stream_.deviceFormat[mode] = stream_.userFormat;
8812 ss.format = sf->pa_format;
8816 if ( !sf_found ) { // Use internal data format conversion.
8817 stream_.userFormat = format;
8818 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8819 ss.format = PA_SAMPLE_FLOAT32LE;
8822 // Set other stream parameters.
8823 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8824 else stream_.userInterleaved = true;
8825 stream_.deviceInterleaved[mode] = true;
8826 stream_.nBuffers = 1;
8827 stream_.doByteSwap[mode] = false;
8828 stream_.nUserChannels[mode] = channels;
8829 stream_.nDeviceChannels[mode] = channels + firstChannel;
8830 stream_.channelOffset[mode] = 0;
8831 std::string streamName = "RtAudio";
8833 // Set flags for buffer conversion.
8834 stream_.doConvertBuffer[mode] = false;
8835 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8836 stream_.doConvertBuffer[mode] = true;
8837 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8838 stream_.doConvertBuffer[mode] = true;
8840 // Allocate necessary internal buffers.
8841 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8842 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8843 if ( stream_.userBuffer[mode] == NULL ) {
8844 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8847 stream_.bufferSize = *bufferSize;
8849 if ( stream_.doConvertBuffer[mode] ) {
8851 bool makeBuffer = true;
8852 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8853 if ( mode == INPUT ) {
8854 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8855 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8856 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8861 bufferBytes *= *bufferSize;
8862 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8863 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8864 if ( stream_.deviceBuffer == NULL ) {
8865 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8871 stream_.device[mode] = device;
8873 // Setup the buffer conversion information structure.
8874 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8876 if ( !stream_.apiHandle ) {
8877 PulseAudioHandle *pah = new PulseAudioHandle;
8879 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8883 stream_.apiHandle = pah;
8884 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8885 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8889 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8892 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8895 pa_buffer_attr buffer_attr;
8896 buffer_attr.fragsize = bufferBytes;
8897 buffer_attr.maxlength = -1;
8899 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8900 if ( !pah->s_rec ) {
8901 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8906 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8907 if ( !pah->s_play ) {
8908 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8916 if ( stream_.mode == UNINITIALIZED )
8917 stream_.mode = mode;
8918 else if ( stream_.mode == mode )
8921 stream_.mode = DUPLEX;
8923 if ( !stream_.callbackInfo.isRunning ) {
8924 stream_.callbackInfo.object = this;
8926 stream_.state = STREAM_STOPPED;
8927 // Set the thread attributes for joinable and realtime scheduling
8928 // priority (optional). The higher priority will only take affect
8929 // if the program is run as root or suid. Note, under Linux
8930 // processes with CAP_SYS_NICE privilege, a user can change
8931 // scheduling policy and priority (thus need not be root). See
8932 // POSIX "capabilities".
8933 pthread_attr_t attr;
8934 pthread_attr_init( &attr );
8935 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8936 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8937 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8938 stream_.callbackInfo.doRealtime = true;
8939 struct sched_param param;
8940 int priority = options->priority;
8941 int min = sched_get_priority_min( SCHED_RR );
8942 int max = sched_get_priority_max( SCHED_RR );
8943 if ( priority < min ) priority = min;
8944 else if ( priority > max ) priority = max;
8945 param.sched_priority = priority;
8947 // Set the policy BEFORE the priority. Otherwise it fails.
8948 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8949 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8950 // This is definitely required. Otherwise it fails.
8951 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8952 pthread_attr_setschedparam(&attr, ¶m);
8955 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8957 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8960 stream_.callbackInfo.isRunning = true;
8961 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8962 pthread_attr_destroy(&attr);
8964 // Failed. Try instead with default attributes.
8965 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8967 stream_.callbackInfo.isRunning = false;
8968 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8977 if ( pah && stream_.callbackInfo.isRunning ) {
8978 pthread_cond_destroy( &pah->runnable_cv );
8980 stream_.apiHandle = 0;
8983 for ( int i=0; i<2; i++ ) {
8984 if ( stream_.userBuffer[i] ) {
8985 free( stream_.userBuffer[i] );
8986 stream_.userBuffer[i] = 0;
8990 if ( stream_.deviceBuffer ) {
8991 free( stream_.deviceBuffer );
8992 stream_.deviceBuffer = 0;
8995 stream_.state = STREAM_CLOSED;
8999 //******************** End of __LINUX_PULSE__ *********************//
9002 #if defined(__LINUX_OSS__)
9005 #include <sys/ioctl.h>
9008 #include <sys/soundcard.h>
9012 static void *ossCallbackHandler(void * ptr);
9014 // A structure to hold various information related to the OSS API
9017 int id[2]; // device ids
9020 pthread_cond_t runnable;
9023 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9026 RtApiOss :: RtApiOss()
9028 // Nothing to do here.
9031 RtApiOss :: ~RtApiOss()
9033 if ( stream_.state != STREAM_CLOSED ) closeStream();
9036 unsigned int RtApiOss :: getDeviceCount( void )
9038 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9039 if ( mixerfd == -1 ) {
9040 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9041 error( RtAudioError::WARNING );
9045 oss_sysinfo sysinfo;
9046 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9048 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9049 error( RtAudioError::WARNING );
9054 return sysinfo.numaudios;
9057 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9059 RtAudio::DeviceInfo info;
9060 info.probed = false;
9062 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9063 if ( mixerfd == -1 ) {
9064 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9065 error( RtAudioError::WARNING );
9069 oss_sysinfo sysinfo;
9070 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9071 if ( result == -1 ) {
9073 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9074 error( RtAudioError::WARNING );
9078 unsigned nDevices = sysinfo.numaudios;
9079 if ( nDevices == 0 ) {
9081 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9082 error( RtAudioError::INVALID_USE );
9086 if ( device >= nDevices ) {
9088 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9089 error( RtAudioError::INVALID_USE );
9093 oss_audioinfo ainfo;
9095 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9097 if ( result == -1 ) {
9098 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9099 errorText_ = errorStream_.str();
9100 error( RtAudioError::WARNING );
9105 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9106 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9107 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9108 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9109 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9112 // Probe data formats ... do for input
9113 unsigned long mask = ainfo.iformats;
9114 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9115 info.nativeFormats |= RTAUDIO_SINT16;
9116 if ( mask & AFMT_S8 )
9117 info.nativeFormats |= RTAUDIO_SINT8;
9118 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9119 info.nativeFormats |= RTAUDIO_SINT32;
9121 if ( mask & AFMT_FLOAT )
9122 info.nativeFormats |= RTAUDIO_FLOAT32;
9124 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9125 info.nativeFormats |= RTAUDIO_SINT24;
9127 // Check that we have at least one supported format
9128 if ( info.nativeFormats == 0 ) {
9129 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9130 errorText_ = errorStream_.str();
9131 error( RtAudioError::WARNING );
9135 // Probe the supported sample rates.
9136 info.sampleRates.clear();
9137 if ( ainfo.nrates ) {
9138 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9139 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9140 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9141 info.sampleRates.push_back( SAMPLE_RATES[k] );
9143 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9144 info.preferredSampleRate = SAMPLE_RATES[k];
9152 // Check min and max rate values;
9153 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9154 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9155 info.sampleRates.push_back( SAMPLE_RATES[k] );
9157 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9158 info.preferredSampleRate = SAMPLE_RATES[k];
9163 if ( info.sampleRates.size() == 0 ) {
9164 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9165 errorText_ = errorStream_.str();
9166 error( RtAudioError::WARNING );
9170 info.name = ainfo.name;
9177 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9178 unsigned int firstChannel, unsigned int sampleRate,
9179 RtAudioFormat format, unsigned int *bufferSize,
9180 RtAudio::StreamOptions *options )
9182 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9183 if ( mixerfd == -1 ) {
9184 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9188 oss_sysinfo sysinfo;
9189 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9190 if ( result == -1 ) {
9192 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9196 unsigned nDevices = sysinfo.numaudios;
9197 if ( nDevices == 0 ) {
9198 // This should not happen because a check is made before this function is called.
9200 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9204 if ( device >= nDevices ) {
9205 // This should not happen because a check is made before this function is called.
9207 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9211 oss_audioinfo ainfo;
9213 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9215 if ( result == -1 ) {
9216 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9217 errorText_ = errorStream_.str();
9221 // Check if device supports input or output
9222 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9223 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9224 if ( mode == OUTPUT )
9225 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9227 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9228 errorText_ = errorStream_.str();
9233 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9234 if ( mode == OUTPUT )
9236 else { // mode == INPUT
9237 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9238 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9239 close( handle->id[0] );
9241 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9242 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9243 errorText_ = errorStream_.str();
9246 // Check that the number previously set channels is the same.
9247 if ( stream_.nUserChannels[0] != channels ) {
9248 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9249 errorText_ = errorStream_.str();
9258 // Set exclusive access if specified.
9259 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9261 // Try to open the device.
9263 fd = open( ainfo.devnode, flags, 0 );
9265 if ( errno == EBUSY )
9266 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9268 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9269 errorText_ = errorStream_.str();
9273 // For duplex operation, specifically set this mode (this doesn't seem to work).
9275 if ( flags | O_RDWR ) {
9276 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9277 if ( result == -1) {
9278 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9279 errorText_ = errorStream_.str();
9285 // Check the device channel support.
9286 stream_.nUserChannels[mode] = channels;
9287 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9289 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9290 errorText_ = errorStream_.str();
9294 // Set the number of channels.
9295 int deviceChannels = channels + firstChannel;
9296 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9297 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9299 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9300 errorText_ = errorStream_.str();
9303 stream_.nDeviceChannels[mode] = deviceChannels;
9305 // Get the data format mask
9307 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9308 if ( result == -1 ) {
9310 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9311 errorText_ = errorStream_.str();
9315 // Determine how to set the device format.
9316 stream_.userFormat = format;
9317 int deviceFormat = -1;
9318 stream_.doByteSwap[mode] = false;
9319 if ( format == RTAUDIO_SINT8 ) {
9320 if ( mask & AFMT_S8 ) {
9321 deviceFormat = AFMT_S8;
9322 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9325 else if ( format == RTAUDIO_SINT16 ) {
9326 if ( mask & AFMT_S16_NE ) {
9327 deviceFormat = AFMT_S16_NE;
9328 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9330 else if ( mask & AFMT_S16_OE ) {
9331 deviceFormat = AFMT_S16_OE;
9332 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9333 stream_.doByteSwap[mode] = true;
9336 else if ( format == RTAUDIO_SINT24 ) {
9337 if ( mask & AFMT_S24_NE ) {
9338 deviceFormat = AFMT_S24_NE;
9339 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9341 else if ( mask & AFMT_S24_OE ) {
9342 deviceFormat = AFMT_S24_OE;
9343 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9344 stream_.doByteSwap[mode] = true;
9347 else if ( format == RTAUDIO_SINT32 ) {
9348 if ( mask & AFMT_S32_NE ) {
9349 deviceFormat = AFMT_S32_NE;
9350 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9352 else if ( mask & AFMT_S32_OE ) {
9353 deviceFormat = AFMT_S32_OE;
9354 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9355 stream_.doByteSwap[mode] = true;
9359 if ( deviceFormat == -1 ) {
9360 // The user requested format is not natively supported by the device.
9361 if ( mask & AFMT_S16_NE ) {
9362 deviceFormat = AFMT_S16_NE;
9363 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9365 else if ( mask & AFMT_S32_NE ) {
9366 deviceFormat = AFMT_S32_NE;
9367 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9369 else if ( mask & AFMT_S24_NE ) {
9370 deviceFormat = AFMT_S24_NE;
9371 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9373 else if ( mask & AFMT_S16_OE ) {
9374 deviceFormat = AFMT_S16_OE;
9375 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9376 stream_.doByteSwap[mode] = true;
9378 else if ( mask & AFMT_S32_OE ) {
9379 deviceFormat = AFMT_S32_OE;
9380 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9381 stream_.doByteSwap[mode] = true;
9383 else if ( mask & AFMT_S24_OE ) {
9384 deviceFormat = AFMT_S24_OE;
9385 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9386 stream_.doByteSwap[mode] = true;
9388 else if ( mask & AFMT_S8) {
9389 deviceFormat = AFMT_S8;
9390 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9394 if ( stream_.deviceFormat[mode] == 0 ) {
9395 // This really shouldn't happen ...
9397 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9398 errorText_ = errorStream_.str();
9402 // Set the data format.
9403 int temp = deviceFormat;
9404 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9405 if ( result == -1 || deviceFormat != temp ) {
9407 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9408 errorText_ = errorStream_.str();
9412 // Attempt to set the buffer size. According to OSS, the minimum
9413 // number of buffers is two. The supposed minimum buffer size is 16
9414 // bytes, so that will be our lower bound. The argument to this
9415 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9416 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9417 // We'll check the actual value used near the end of the setup
9419 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9420 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9422 if ( options ) buffers = options->numberOfBuffers;
9423 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9424 if ( buffers < 2 ) buffers = 3;
9425 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9426 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9427 if ( result == -1 ) {
9429 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9430 errorText_ = errorStream_.str();
9433 stream_.nBuffers = buffers;
9435 // Save buffer size (in sample frames).
9436 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9437 stream_.bufferSize = *bufferSize;
9439 // Set the sample rate.
9440 int srate = sampleRate;
9441 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9442 if ( result == -1 ) {
9444 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9445 errorText_ = errorStream_.str();
9449 // Verify the sample rate setup worked.
9450 if ( abs( srate - (int)sampleRate ) > 100 ) {
9452 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9453 errorText_ = errorStream_.str();
9456 stream_.sampleRate = sampleRate;
9458 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9459 // We're doing duplex setup here.
9460 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9461 stream_.nDeviceChannels[0] = deviceChannels;
9464 // Set interleaving parameters.
9465 stream_.userInterleaved = true;
9466 stream_.deviceInterleaved[mode] = true;
9467 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9468 stream_.userInterleaved = false;
9470 // Set flags for buffer conversion
9471 stream_.doConvertBuffer[mode] = false;
9472 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9473 stream_.doConvertBuffer[mode] = true;
9474 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9475 stream_.doConvertBuffer[mode] = true;
9476 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9477 stream_.nUserChannels[mode] > 1 )
9478 stream_.doConvertBuffer[mode] = true;
9480 // Allocate the stream handles if necessary and then save.
9481 if ( stream_.apiHandle == 0 ) {
9483 handle = new OssHandle;
9485 catch ( std::bad_alloc& ) {
9486 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9490 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9491 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9495 stream_.apiHandle = (void *) handle;
9498 handle = (OssHandle *) stream_.apiHandle;
9500 handle->id[mode] = fd;
9502 // Allocate necessary internal buffers.
9503 unsigned long bufferBytes;
9504 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9505 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9506 if ( stream_.userBuffer[mode] == NULL ) {
9507 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9511 if ( stream_.doConvertBuffer[mode] ) {
9513 bool makeBuffer = true;
9514 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9515 if ( mode == INPUT ) {
9516 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9517 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9518 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9523 bufferBytes *= *bufferSize;
9524 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9525 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9526 if ( stream_.deviceBuffer == NULL ) {
9527 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9533 stream_.device[mode] = device;
9534 stream_.state = STREAM_STOPPED;
9536 // Setup the buffer conversion information structure.
9537 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9539 // Setup thread if necessary.
9540 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9541 // We had already set up an output stream.
9542 stream_.mode = DUPLEX;
9543 if ( stream_.device[0] == device ) handle->id[0] = fd;
9546 stream_.mode = mode;
9548 // Setup callback thread.
9549 stream_.callbackInfo.object = (void *) this;
9551 // Set the thread attributes for joinable and realtime scheduling
9552 // priority. The higher priority will only take affect if the
9553 // program is run as root or suid.
9554 pthread_attr_t attr;
9555 pthread_attr_init( &attr );
9556 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9557 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9558 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9559 stream_.callbackInfo.doRealtime = true;
9560 struct sched_param param;
9561 int priority = options->priority;
9562 int min = sched_get_priority_min( SCHED_RR );
9563 int max = sched_get_priority_max( SCHED_RR );
9564 if ( priority < min ) priority = min;
9565 else if ( priority > max ) priority = max;
9566 param.sched_priority = priority;
9568 // Set the policy BEFORE the priority. Otherwise it fails.
9569 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9570 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9571 // This is definitely required. Otherwise it fails.
9572 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9573 pthread_attr_setschedparam(&attr, ¶m);
9576 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9578 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9581 stream_.callbackInfo.isRunning = true;
9582 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9583 pthread_attr_destroy( &attr );
9585 // Failed. Try instead with default attributes.
9586 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9588 stream_.callbackInfo.isRunning = false;
9589 errorText_ = "RtApiOss::error creating callback thread!";
9599 pthread_cond_destroy( &handle->runnable );
9600 if ( handle->id[0] ) close( handle->id[0] );
9601 if ( handle->id[1] ) close( handle->id[1] );
9603 stream_.apiHandle = 0;
9606 for ( int i=0; i<2; i++ ) {
9607 if ( stream_.userBuffer[i] ) {
9608 free( stream_.userBuffer[i] );
9609 stream_.userBuffer[i] = 0;
9613 if ( stream_.deviceBuffer ) {
9614 free( stream_.deviceBuffer );
9615 stream_.deviceBuffer = 0;
9618 stream_.state = STREAM_CLOSED;
9622 void RtApiOss :: closeStream()
9624 if ( stream_.state == STREAM_CLOSED ) {
9625 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9626 error( RtAudioError::WARNING );
9630 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9631 stream_.callbackInfo.isRunning = false;
9632 MUTEX_LOCK( &stream_.mutex );
9633 if ( stream_.state == STREAM_STOPPED )
9634 pthread_cond_signal( &handle->runnable );
9635 MUTEX_UNLOCK( &stream_.mutex );
9636 pthread_join( stream_.callbackInfo.thread, NULL );
9638 if ( stream_.state == STREAM_RUNNING ) {
9639 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9640 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9642 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9643 stream_.state = STREAM_STOPPED;
9647 pthread_cond_destroy( &handle->runnable );
9648 if ( handle->id[0] ) close( handle->id[0] );
9649 if ( handle->id[1] ) close( handle->id[1] );
9651 stream_.apiHandle = 0;
9654 for ( int i=0; i<2; i++ ) {
9655 if ( stream_.userBuffer[i] ) {
9656 free( stream_.userBuffer[i] );
9657 stream_.userBuffer[i] = 0;
9661 if ( stream_.deviceBuffer ) {
9662 free( stream_.deviceBuffer );
9663 stream_.deviceBuffer = 0;
9666 stream_.mode = UNINITIALIZED;
9667 stream_.state = STREAM_CLOSED;
9670 void RtApiOss :: startStream()
9673 if ( stream_.state == STREAM_RUNNING ) {
9674 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9675 error( RtAudioError::WARNING );
9679 MUTEX_LOCK( &stream_.mutex );
9681 #if defined( HAVE_GETTIMEOFDAY )
9682 gettimeofday( &stream_.lastTickTimestamp, NULL );
9685 stream_.state = STREAM_RUNNING;
9687 // No need to do anything else here ... OSS automatically starts
9688 // when fed samples.
9690 MUTEX_UNLOCK( &stream_.mutex );
9692 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9693 pthread_cond_signal( &handle->runnable );
9696 void RtApiOss :: stopStream()
9699 if ( stream_.state == STREAM_STOPPED ) {
9700 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9701 error( RtAudioError::WARNING );
9705 MUTEX_LOCK( &stream_.mutex );
9707 // The state might change while waiting on a mutex.
9708 if ( stream_.state == STREAM_STOPPED ) {
9709 MUTEX_UNLOCK( &stream_.mutex );
9714 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9715 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9717 // Flush the output with zeros a few times.
9720 RtAudioFormat format;
9722 if ( stream_.doConvertBuffer[0] ) {
9723 buffer = stream_.deviceBuffer;
9724 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9725 format = stream_.deviceFormat[0];
9728 buffer = stream_.userBuffer[0];
9729 samples = stream_.bufferSize * stream_.nUserChannels[0];
9730 format = stream_.userFormat;
9733 memset( buffer, 0, samples * formatBytes(format) );
9734 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9735 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9736 if ( result == -1 ) {
9737 errorText_ = "RtApiOss::stopStream: audio write error.";
9738 error( RtAudioError::WARNING );
9742 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9743 if ( result == -1 ) {
9744 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9745 errorText_ = errorStream_.str();
9748 handle->triggered = false;
9751 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9752 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9753 if ( result == -1 ) {
9754 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9755 errorText_ = errorStream_.str();
9761 stream_.state = STREAM_STOPPED;
9762 MUTEX_UNLOCK( &stream_.mutex );
9764 if ( result != -1 ) return;
9765 error( RtAudioError::SYSTEM_ERROR );
9768 void RtApiOss :: abortStream()
9771 if ( stream_.state == STREAM_STOPPED ) {
9772 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9773 error( RtAudioError::WARNING );
9777 MUTEX_LOCK( &stream_.mutex );
9779 // The state might change while waiting on a mutex.
9780 if ( stream_.state == STREAM_STOPPED ) {
9781 MUTEX_UNLOCK( &stream_.mutex );
9786 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9787 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9788 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9789 if ( result == -1 ) {
9790 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9791 errorText_ = errorStream_.str();
9794 handle->triggered = false;
9797 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9798 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9799 if ( result == -1 ) {
9800 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9801 errorText_ = errorStream_.str();
9807 stream_.state = STREAM_STOPPED;
9808 MUTEX_UNLOCK( &stream_.mutex );
9810 if ( result != -1 ) return;
9811 error( RtAudioError::SYSTEM_ERROR );
9814 void RtApiOss :: callbackEvent()
9816 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9817 if ( stream_.state == STREAM_STOPPED ) {
9818 MUTEX_LOCK( &stream_.mutex );
9819 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9820 if ( stream_.state != STREAM_RUNNING ) {
9821 MUTEX_UNLOCK( &stream_.mutex );
9824 MUTEX_UNLOCK( &stream_.mutex );
9827 if ( stream_.state == STREAM_CLOSED ) {
9828 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9829 error( RtAudioError::WARNING );
9833 // Invoke user callback to get fresh output data.
9834 int doStopStream = 0;
9835 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9836 double streamTime = getStreamTime();
9837 RtAudioStreamStatus status = 0;
9838 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9839 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9840 handle->xrun[0] = false;
9842 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9843 status |= RTAUDIO_INPUT_OVERFLOW;
9844 handle->xrun[1] = false;
9846 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9847 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9848 if ( doStopStream == 2 ) {
9849 this->abortStream();
9853 MUTEX_LOCK( &stream_.mutex );
9855 // The state might change while waiting on a mutex.
9856 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9861 RtAudioFormat format;
9863 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9865 // Setup parameters and do buffer conversion if necessary.
9866 if ( stream_.doConvertBuffer[0] ) {
9867 buffer = stream_.deviceBuffer;
9868 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9869 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9870 format = stream_.deviceFormat[0];
9873 buffer = stream_.userBuffer[0];
9874 samples = stream_.bufferSize * stream_.nUserChannels[0];
9875 format = stream_.userFormat;
9878 // Do byte swapping if necessary.
9879 if ( stream_.doByteSwap[0] )
9880 byteSwapBuffer( buffer, samples, format );
9882 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9884 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9885 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9886 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9887 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9888 handle->triggered = true;
9891 // Write samples to device.
9892 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9894 if ( result == -1 ) {
9895 // We'll assume this is an underrun, though there isn't a
9896 // specific means for determining that.
9897 handle->xrun[0] = true;
9898 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9899 error( RtAudioError::WARNING );
9900 // Continue on to input section.
9904 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9906 // Setup parameters.
9907 if ( stream_.doConvertBuffer[1] ) {
9908 buffer = stream_.deviceBuffer;
9909 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9910 format = stream_.deviceFormat[1];
9913 buffer = stream_.userBuffer[1];
9914 samples = stream_.bufferSize * stream_.nUserChannels[1];
9915 format = stream_.userFormat;
9918 // Read samples from device.
9919 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9921 if ( result == -1 ) {
9922 // We'll assume this is an overrun, though there isn't a
9923 // specific means for determining that.
9924 handle->xrun[1] = true;
9925 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9926 error( RtAudioError::WARNING );
9930 // Do byte swapping if necessary.
9931 if ( stream_.doByteSwap[1] )
9932 byteSwapBuffer( buffer, samples, format );
9934 // Do buffer conversion if necessary.
9935 if ( stream_.doConvertBuffer[1] )
9936 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9940 MUTEX_UNLOCK( &stream_.mutex );
9942 RtApi::tickStreamTime();
9943 if ( doStopStream == 1 ) this->stopStream();
9946 static void *ossCallbackHandler( void *ptr )
9948 CallbackInfo *info = (CallbackInfo *) ptr;
9949 RtApiOss *object = (RtApiOss *) info->object;
9950 bool *isRunning = &info->isRunning;
9952 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9953 if (info->doRealtime) {
9954 std::cerr << "RtAudio oss: " <<
9955 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9956 "running realtime scheduling" << std::endl;
9960 while ( *isRunning == true ) {
9961 pthread_testcancel();
9962 object->callbackEvent();
9965 pthread_exit( NULL );
9968 //******************** End of __LINUX_OSS__ *********************//
9972 // *************************************************** //
9974 // Protected common (OS-independent) RtAudio methods.
9976 // *************************************************** //
9978 // This method can be modified to control the behavior of error
9979 // message printing.
9980 RtAudioErrorType RtApi :: error( RtAudioErrorType type )
9982 errorStream_.str(""); // clear the ostringstream to avoid repeated messages
9984 // Don't output warnings if showWarnings_ is false
9985 if ( type == RTAUDIO_WARNING && showWarnings_ == false ) return type;
9987 if ( errorCallback_ ) {
9988 const std::string errorMessage = errorText_;
9989 errorCallback_( type, errorMessage );
9992 std::cerr << '\n' << errorText_ << "\n\n";
9997 void RtApi :: verifyStream()
9999 if ( stream_.state == STREAM_CLOSED ) {
10000 errorText_ = "RtApi:: a stream is not open!";
10001 error( RtAudioError::INVALID_USE );
10006 void RtApi :: clearStreamInfo()
10008 stream_.mode = UNINITIALIZED;
10009 stream_.state = STREAM_CLOSED;
10010 stream_.sampleRate = 0;
10011 stream_.bufferSize = 0;
10012 stream_.nBuffers = 0;
10013 stream_.userFormat = 0;
10014 stream_.userInterleaved = true;
10015 stream_.streamTime = 0.0;
10016 stream_.apiHandle = 0;
10017 stream_.deviceBuffer = 0;
10018 stream_.callbackInfo.callback = 0;
10019 stream_.callbackInfo.userData = 0;
10020 stream_.callbackInfo.isRunning = false;
10021 for ( int i=0; i<2; i++ ) {
10022 stream_.device[i] = 11111;
10023 stream_.doConvertBuffer[i] = false;
10024 stream_.deviceInterleaved[i] = true;
10025 stream_.doByteSwap[i] = false;
10026 stream_.nUserChannels[i] = 0;
10027 stream_.nDeviceChannels[i] = 0;
10028 stream_.channelOffset[i] = 0;
10029 stream_.deviceFormat[i] = 0;
10030 stream_.latency[i] = 0;
10031 stream_.userBuffer[i] = 0;
10032 stream_.convertInfo[i].channels = 0;
10033 stream_.convertInfo[i].inJump = 0;
10034 stream_.convertInfo[i].outJump = 0;
10035 stream_.convertInfo[i].inFormat = 0;
10036 stream_.convertInfo[i].outFormat = 0;
10037 stream_.convertInfo[i].inOffset.clear();
10038 stream_.convertInfo[i].outOffset.clear();
10042 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10044 if ( format == RTAUDIO_SINT16 )
10046 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10048 else if ( format == RTAUDIO_FLOAT64 )
10050 else if ( format == RTAUDIO_SINT24 )
10052 else if ( format == RTAUDIO_SINT8 )
10055 errorText_ = "RtApi::formatBytes: undefined format.";
10056 error( RTAUDIO_WARNING );
10061 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10063 if ( mode == INPUT ) { // convert device to user buffer
10064 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10065 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10066 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10067 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10069 else { // convert user to device buffer
10070 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10071 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10072 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10073 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10076 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10077 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10079 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10081 // Set up the interleave/deinterleave offsets.
10082 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10083 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10084 ( mode == INPUT && stream_.userInterleaved ) ) {
10085 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10086 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10087 stream_.convertInfo[mode].outOffset.push_back( k );
10088 stream_.convertInfo[mode].inJump = 1;
10092 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10093 stream_.convertInfo[mode].inOffset.push_back( k );
10094 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10095 stream_.convertInfo[mode].outJump = 1;
10099 else { // no (de)interleaving
10100 if ( stream_.userInterleaved ) {
10101 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10102 stream_.convertInfo[mode].inOffset.push_back( k );
10103 stream_.convertInfo[mode].outOffset.push_back( k );
10107 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10108 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10109 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10110 stream_.convertInfo[mode].inJump = 1;
10111 stream_.convertInfo[mode].outJump = 1;
10116 // Add channel offset.
10117 if ( firstChannel > 0 ) {
10118 if ( stream_.deviceInterleaved[mode] ) {
10119 if ( mode == OUTPUT ) {
10120 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10121 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10124 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10125 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10129 if ( mode == OUTPUT ) {
10130 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10131 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10134 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10135 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10141 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10143 // This function does format conversion, input/output channel compensation, and
10144 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10145 // the lower three bytes of a 32-bit integer.
10147 // Clear our device buffer when in/out duplex device channels are different
10148 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10149 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10150 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10153 if (info.outFormat == RTAUDIO_FLOAT64) {
10155 Float64 *out = (Float64 *)outBuffer;
10157 if (info.inFormat == RTAUDIO_SINT8) {
10158 signed char *in = (signed char *)inBuffer;
10159 scale = 1.0 / 127.5;
10160 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10161 for (j=0; j<info.channels; j++) {
10162 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10163 out[info.outOffset[j]] += 0.5;
10164 out[info.outOffset[j]] *= scale;
10167 out += info.outJump;
10170 else if (info.inFormat == RTAUDIO_SINT16) {
10171 Int16 *in = (Int16 *)inBuffer;
10172 scale = 1.0 / 32767.5;
10173 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10174 for (j=0; j<info.channels; j++) {
10175 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10176 out[info.outOffset[j]] += 0.5;
10177 out[info.outOffset[j]] *= scale;
10180 out += info.outJump;
10183 else if (info.inFormat == RTAUDIO_SINT24) {
10184 Int24 *in = (Int24 *)inBuffer;
10185 scale = 1.0 / 8388607.5;
10186 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10187 for (j=0; j<info.channels; j++) {
10188 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10189 out[info.outOffset[j]] += 0.5;
10190 out[info.outOffset[j]] *= scale;
10193 out += info.outJump;
10196 else if (info.inFormat == RTAUDIO_SINT32) {
10197 Int32 *in = (Int32 *)inBuffer;
10198 scale = 1.0 / 2147483647.5;
10199 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10200 for (j=0; j<info.channels; j++) {
10201 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10202 out[info.outOffset[j]] += 0.5;
10203 out[info.outOffset[j]] *= scale;
10206 out += info.outJump;
10209 else if (info.inFormat == RTAUDIO_FLOAT32) {
10210 Float32 *in = (Float32 *)inBuffer;
10211 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10212 for (j=0; j<info.channels; j++) {
10213 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10216 out += info.outJump;
10219 else if (info.inFormat == RTAUDIO_FLOAT64) {
10220 // Channel compensation and/or (de)interleaving only.
10221 Float64 *in = (Float64 *)inBuffer;
10222 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10223 for (j=0; j<info.channels; j++) {
10224 out[info.outOffset[j]] = in[info.inOffset[j]];
10227 out += info.outJump;
10231 else if (info.outFormat == RTAUDIO_FLOAT32) {
10233 Float32 *out = (Float32 *)outBuffer;
10235 if (info.inFormat == RTAUDIO_SINT8) {
10236 signed char *in = (signed char *)inBuffer;
10237 scale = (Float32) ( 1.0 / 127.5 );
10238 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10239 for (j=0; j<info.channels; j++) {
10240 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10241 out[info.outOffset[j]] += 0.5;
10242 out[info.outOffset[j]] *= scale;
10245 out += info.outJump;
10248 else if (info.inFormat == RTAUDIO_SINT16) {
10249 Int16 *in = (Int16 *)inBuffer;
10250 scale = (Float32) ( 1.0 / 32767.5 );
10251 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10252 for (j=0; j<info.channels; j++) {
10253 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10254 out[info.outOffset[j]] += 0.5;
10255 out[info.outOffset[j]] *= scale;
10258 out += info.outJump;
10261 else if (info.inFormat == RTAUDIO_SINT24) {
10262 Int24 *in = (Int24 *)inBuffer;
10263 scale = (Float32) ( 1.0 / 8388607.5 );
10264 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10265 for (j=0; j<info.channels; j++) {
10266 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10267 out[info.outOffset[j]] += 0.5;
10268 out[info.outOffset[j]] *= scale;
10271 out += info.outJump;
10274 else if (info.inFormat == RTAUDIO_SINT32) {
10275 Int32 *in = (Int32 *)inBuffer;
10276 scale = (Float32) ( 1.0 / 2147483647.5 );
10277 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10278 for (j=0; j<info.channels; j++) {
10279 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10280 out[info.outOffset[j]] += 0.5;
10281 out[info.outOffset[j]] *= scale;
10284 out += info.outJump;
10287 else if (info.inFormat == RTAUDIO_FLOAT32) {
10288 // Channel compensation and/or (de)interleaving only.
10289 Float32 *in = (Float32 *)inBuffer;
10290 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10291 for (j=0; j<info.channels; j++) {
10292 out[info.outOffset[j]] = in[info.inOffset[j]];
10295 out += info.outJump;
10298 else if (info.inFormat == RTAUDIO_FLOAT64) {
10299 Float64 *in = (Float64 *)inBuffer;
10300 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10301 for (j=0; j<info.channels; j++) {
10302 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10305 out += info.outJump;
10309 else if (info.outFormat == RTAUDIO_SINT32) {
10310 Int32 *out = (Int32 *)outBuffer;
10311 if (info.inFormat == RTAUDIO_SINT8) {
10312 signed char *in = (signed char *)inBuffer;
10313 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10314 for (j=0; j<info.channels; j++) {
10315 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10316 out[info.outOffset[j]] <<= 24;
10319 out += info.outJump;
10322 else if (info.inFormat == RTAUDIO_SINT16) {
10323 Int16 *in = (Int16 *)inBuffer;
10324 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10325 for (j=0; j<info.channels; j++) {
10326 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10327 out[info.outOffset[j]] <<= 16;
10330 out += info.outJump;
10333 else if (info.inFormat == RTAUDIO_SINT24) {
10334 Int24 *in = (Int24 *)inBuffer;
10335 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10336 for (j=0; j<info.channels; j++) {
10337 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10338 out[info.outOffset[j]] <<= 8;
10341 out += info.outJump;
10344 else if (info.inFormat == RTAUDIO_SINT32) {
10345 // Channel compensation and/or (de)interleaving only.
10346 Int32 *in = (Int32 *)inBuffer;
10347 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10348 for (j=0; j<info.channels; j++) {
10349 out[info.outOffset[j]] = in[info.inOffset[j]];
10352 out += info.outJump;
10355 else if (info.inFormat == RTAUDIO_FLOAT32) {
10356 Float32 *in = (Float32 *)inBuffer;
10357 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10358 for (j=0; j<info.channels; j++) {
10359 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10362 out += info.outJump;
10365 else if (info.inFormat == RTAUDIO_FLOAT64) {
10366 Float64 *in = (Float64 *)inBuffer;
10367 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10368 for (j=0; j<info.channels; j++) {
10369 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10372 out += info.outJump;
10376 else if (info.outFormat == RTAUDIO_SINT24) {
10377 Int24 *out = (Int24 *)outBuffer;
10378 if (info.inFormat == RTAUDIO_SINT8) {
10379 signed char *in = (signed char *)inBuffer;
10380 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10381 for (j=0; j<info.channels; j++) {
10382 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10383 //out[info.outOffset[j]] <<= 16;
10386 out += info.outJump;
10389 else if (info.inFormat == RTAUDIO_SINT16) {
10390 Int16 *in = (Int16 *)inBuffer;
10391 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10392 for (j=0; j<info.channels; j++) {
10393 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10394 //out[info.outOffset[j]] <<= 8;
10397 out += info.outJump;
10400 else if (info.inFormat == RTAUDIO_SINT24) {
10401 // Channel compensation and/or (de)interleaving only.
10402 Int24 *in = (Int24 *)inBuffer;
10403 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10404 for (j=0; j<info.channels; j++) {
10405 out[info.outOffset[j]] = in[info.inOffset[j]];
10408 out += info.outJump;
10411 else if (info.inFormat == RTAUDIO_SINT32) {
10412 Int32 *in = (Int32 *)inBuffer;
10413 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10414 for (j=0; j<info.channels; j++) {
10415 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10416 //out[info.outOffset[j]] >>= 8;
10419 out += info.outJump;
10422 else if (info.inFormat == RTAUDIO_FLOAT32) {
10423 Float32 *in = (Float32 *)inBuffer;
10424 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10425 for (j=0; j<info.channels; j++) {
10426 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10429 out += info.outJump;
10432 else if (info.inFormat == RTAUDIO_FLOAT64) {
10433 Float64 *in = (Float64 *)inBuffer;
10434 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10435 for (j=0; j<info.channels; j++) {
10436 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10439 out += info.outJump;
10443 else if (info.outFormat == RTAUDIO_SINT16) {
10444 Int16 *out = (Int16 *)outBuffer;
10445 if (info.inFormat == RTAUDIO_SINT8) {
10446 signed char *in = (signed char *)inBuffer;
10447 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10448 for (j=0; j<info.channels; j++) {
10449 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10450 out[info.outOffset[j]] <<= 8;
10453 out += info.outJump;
10456 else if (info.inFormat == RTAUDIO_SINT16) {
10457 // Channel compensation and/or (de)interleaving only.
10458 Int16 *in = (Int16 *)inBuffer;
10459 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10460 for (j=0; j<info.channels; j++) {
10461 out[info.outOffset[j]] = in[info.inOffset[j]];
10464 out += info.outJump;
10467 else if (info.inFormat == RTAUDIO_SINT24) {
10468 Int24 *in = (Int24 *)inBuffer;
10469 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10470 for (j=0; j<info.channels; j++) {
10471 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10474 out += info.outJump;
10477 else if (info.inFormat == RTAUDIO_SINT32) {
10478 Int32 *in = (Int32 *)inBuffer;
10479 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10480 for (j=0; j<info.channels; j++) {
10481 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10484 out += info.outJump;
10487 else if (info.inFormat == RTAUDIO_FLOAT32) {
10488 Float32 *in = (Float32 *)inBuffer;
10489 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10490 for (j=0; j<info.channels; j++) {
10491 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10494 out += info.outJump;
10497 else if (info.inFormat == RTAUDIO_FLOAT64) {
10498 Float64 *in = (Float64 *)inBuffer;
10499 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10500 for (j=0; j<info.channels; j++) {
10501 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10504 out += info.outJump;
10508 else if (info.outFormat == RTAUDIO_SINT8) {
10509 signed char *out = (signed char *)outBuffer;
10510 if (info.inFormat == RTAUDIO_SINT8) {
10511 // Channel compensation and/or (de)interleaving only.
10512 signed char *in = (signed char *)inBuffer;
10513 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10514 for (j=0; j<info.channels; j++) {
10515 out[info.outOffset[j]] = in[info.inOffset[j]];
10518 out += info.outJump;
10521 if (info.inFormat == RTAUDIO_SINT16) {
10522 Int16 *in = (Int16 *)inBuffer;
10523 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10524 for (j=0; j<info.channels; j++) {
10525 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10528 out += info.outJump;
10531 else if (info.inFormat == RTAUDIO_SINT24) {
10532 Int24 *in = (Int24 *)inBuffer;
10533 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10534 for (j=0; j<info.channels; j++) {
10535 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10538 out += info.outJump;
10541 else if (info.inFormat == RTAUDIO_SINT32) {
10542 Int32 *in = (Int32 *)inBuffer;
10543 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10544 for (j=0; j<info.channels; j++) {
10545 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10548 out += info.outJump;
10551 else if (info.inFormat == RTAUDIO_FLOAT32) {
10552 Float32 *in = (Float32 *)inBuffer;
10553 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10554 for (j=0; j<info.channels; j++) {
10555 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10558 out += info.outJump;
10561 else if (info.inFormat == RTAUDIO_FLOAT64) {
10562 Float64 *in = (Float64 *)inBuffer;
10563 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10564 for (j=0; j<info.channels; j++) {
10565 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10568 out += info.outJump;
10574 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10575 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10576 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10578 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10584 if ( format == RTAUDIO_SINT16 ) {
10585 for ( unsigned int i=0; i<samples; i++ ) {
10586 // Swap 1st and 2nd bytes.
10591 // Increment 2 bytes.
10595 else if ( format == RTAUDIO_SINT32 ||
10596 format == RTAUDIO_FLOAT32 ) {
10597 for ( unsigned int i=0; i<samples; i++ ) {
10598 // Swap 1st and 4th bytes.
10603 // Swap 2nd and 3rd bytes.
10609 // Increment 3 more bytes.
10613 else if ( format == RTAUDIO_SINT24 ) {
10614 for ( unsigned int i=0; i<samples; i++ ) {
10615 // Swap 1st and 3rd bytes.
10620 // Increment 2 more bytes.
10624 else if ( format == RTAUDIO_FLOAT64 ) {
10625 for ( unsigned int i=0; i<samples; i++ ) {
10626 // Swap 1st and 8th bytes
10631 // Swap 2nd and 7th bytes
10637 // Swap 3rd and 6th bytes
10643 // Swap 4th and 5th bytes
10649 // Increment 5 more bytes.
10655 // Indentation settings for Vim and Emacs
10657 // Local Variables:
10658 // c-basic-offset: 2
10659 // indent-tabs-mode: nil
10662 // vim: et sts=2 sw=2