1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
154 extern "C" const unsigned int rtaudio_num_compiled_apis =
155 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
158 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
159 // If the build breaks here, check that they match.
160 template<bool b> class StaticAssert { private: StaticAssert() {} };
161 template<> class StaticAssert<true>{ public: StaticAssert() {} };
162 class StaticAssertions { StaticAssertions() {
163 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
166 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
168 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
169 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
172 std::string RtAudio :: getApiName( RtAudio::Api api )
174 if (api < 0 || api >= RtAudio::NUM_APIS)
176 return rtaudio_api_names[api][0];
179 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
181 if (api < 0 || api >= RtAudio::NUM_APIS)
183 return rtaudio_api_names[api][1];
186 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
189 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
190 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
191 return rtaudio_compiled_apis[i];
192 return RtAudio::UNSPECIFIED;
195 void RtAudio :: openRtApi( RtAudio::Api api )
201 #if defined(__UNIX_JACK__)
202 if ( api == UNIX_JACK )
203 rtapi_ = new RtApiJack();
205 #if defined(__LINUX_ALSA__)
206 if ( api == LINUX_ALSA )
207 rtapi_ = new RtApiAlsa();
209 #if defined(__LINUX_PULSE__)
210 if ( api == LINUX_PULSE )
211 rtapi_ = new RtApiPulse();
213 #if defined(__LINUX_OSS__)
214 if ( api == LINUX_OSS )
215 rtapi_ = new RtApiOss();
217 #if defined(__WINDOWS_ASIO__)
218 if ( api == WINDOWS_ASIO )
219 rtapi_ = new RtApiAsio();
221 #if defined(__WINDOWS_WASAPI__)
222 if ( api == WINDOWS_WASAPI )
223 rtapi_ = new RtApiWasapi();
225 #if defined(__WINDOWS_DS__)
226 if ( api == WINDOWS_DS )
227 rtapi_ = new RtApiDs();
229 #if defined(__MACOSX_CORE__)
230 if ( api == MACOSX_CORE )
231 rtapi_ = new RtApiCore();
233 #if defined(__RTAUDIO_DUMMY__)
234 if ( api == RTAUDIO_DUMMY )
235 rtapi_ = new RtApiDummy();
239 RtAudio :: RtAudio( RtAudio::Api api )
243 if ( api != UNSPECIFIED ) {
244 // Attempt to open the specified API.
246 if ( rtapi_ ) return;
248 // No compiled support for specified API value. Issue a debug
249 // warning and continue as if no API was specified.
250 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
253 // Iterate through the compiled APIs and return as soon as we find
254 // one with at least one device or we reach the end of the list.
255 std::vector< RtAudio::Api > apis;
256 getCompiledApi( apis );
257 for ( unsigned int i=0; i<apis.size(); i++ ) {
258 openRtApi( apis[i] );
259 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
262 if ( rtapi_ ) return;
264 // It should not be possible to get here because the preprocessor
265 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
266 // if no API-specific definitions are passed to the compiler. But just
267 // in case something weird happens, we'll throw an error.
268 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
269 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
272 RtAudio :: ~RtAudio()
278 //void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
279 RtAudioError::Type RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
280 RtAudio::StreamParameters *inputParameters,
281 RtAudioFormat format, unsigned int sampleRate,
282 unsigned int *bufferFrames,
283 RtAudioCallback callback, void *userData,
284 RtAudio::StreamOptions *options ) //, RtAudioErrorCallback errorCallback )
286 return rtapi_->openStream( outputParameters, inputParameters, format,
287 sampleRate, bufferFrames, callback,
288 userData, options ); //, errorCallback );
291 // *************************************************** //
293 // Public RtApi definitions (see end of file for
294 // private or protected utility functions).
296 // *************************************************** //
301 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 //firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 //void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudioError::Type RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options ) //, RtAudioErrorCallback errorCallback )
320 //RtAudioError::Type type = RtAudioError::NO_ERROR;
321 if ( stream_.state != STREAM_CLOSED ) {
322 //type = RtAudioError::INVALID_USE;
323 errorText_ = "RtApi::openStream: a stream is already open!";
324 return error( RtAudioError::INVALID_USE );
327 // Clear stream information potentially left from a previously open stream.
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 return error( RtAudioError::INVALID_USE );
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 return error( RtAudioError::INVALID_USE );
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 return error( RtAudioError::INVALID_USE );
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 return error( RtAudioError::INVALID_USE );
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 return error( RtAudioError::INVALID_USE );
365 unsigned int iChannels = 0;
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 return error( RtAudioError::INVALID_USE );
377 if ( oChannels > 0 ) {
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 return error( RtAudioError::SYSTEM_ERROR );
387 if ( iChannels > 0 ) {
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 return error( RtAudioError::SYSTEM_ERROR );
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 //stream_.callbackInfo.errorCallback = (void *) errorCallback;
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
404 return RtAudioError::NO_ERROR;
407 unsigned int RtApi :: getDefaultInputDevice( void )
409 // Should be implemented in subclasses if possible.
413 unsigned int RtApi :: getDefaultOutputDevice( void )
415 // Should be implemented in subclasses if possible.
419 void RtApi :: closeStream( void )
421 // MUST be implemented in subclasses!
425 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
426 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
427 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
428 RtAudio::StreamOptions * /*options*/ )
430 // MUST be implemented in subclasses!
434 void RtApi :: tickStreamTime( void )
436 // Subclasses that do not provide their own implementation of
437 // getStreamTime should call this function once per buffer I/O to
438 // provide basic stream time support.
440 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
443 #if defined( HAVE_GETTIMEOFDAY )
444 gettimeofday( &stream_.lastTickTimestamp, NULL );
449 long RtApi :: getStreamLatency( void )
451 long totalLatency = 0;
452 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
453 totalLatency = stream_.latency[0];
454 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
455 totalLatency += stream_.latency[1];
461 double RtApi :: getStreamTime( void )
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
469 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
470 return stream_.streamTime;
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
478 return stream_.streamTime;
483 void RtApi :: setStreamTime( double time )
488 stream_.streamTime = time;
490 #if defined( HAVE_GETTIMEOFDAY )
491 gettimeofday( &stream_.lastTickTimestamp, NULL );
496 unsigned int RtApi :: getStreamSampleRate( void )
499 if ( isStreamOpen() ) return stream_.sampleRate;
504 // *************************************************** //
506 // OS/API-specific methods.
508 // *************************************************** //
510 #if defined(__MACOSX_CORE__)
512 // The OS X CoreAudio API is designed to use a separate callback
513 // procedure for each of its audio devices. A single RtAudio duplex
514 // stream using two different devices is supported here, though it
515 // cannot be guaranteed to always behave correctly because we cannot
516 // synchronize these two callbacks.
518 // A property listener is installed for over/underrun information.
519 // However, no functionality is currently provided to allow property
520 // listeners to trigger user handlers because it is unclear what could
521 // be done if a critical stream parameter (buffer size, sample rate,
522 // device disconnect) notification arrived. The listeners entail
523 // quite a bit of extra code and most likely, a user program wouldn't
524 // be prepared for the result anyway. However, we do provide a flag
525 // to the client callback function to inform of an over/underrun.
527 // A structure to hold various information related to the CoreAudio API
530 AudioDeviceID id[2]; // device ids
531 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
532 AudioDeviceIOProcID procId[2];
534 UInt32 iStream[2]; // device stream index (or first if using multiple)
535 UInt32 nStreams[2]; // number of streams to use
538 pthread_cond_t condition;
539 int drainCounter; // Tracks callback counts when draining
540 bool internalDrain; // Indicates if stop is initiated from callback or not.
543 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
546 RtApiCore:: RtApiCore()
548 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
549 // This is a largely undocumented but absolutely necessary
550 // requirement starting with OS-X 10.6. If not called, queries and
551 // updates to various audio device properties are not handled
553 CFRunLoopRef theRunLoop = NULL;
554 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
555 kAudioObjectPropertyScopeGlobal,
556 kAudioObjectPropertyElementMaster };
557 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
558 if ( result != noErr ) {
559 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
560 error( RtAudioError::WARNING );
565 RtApiCore :: ~RtApiCore()
567 // The subclass destructor gets called before the base class
568 // destructor, so close an existing stream before deallocating
569 // apiDeviceId memory.
570 if ( stream_.state != STREAM_CLOSED ) closeStream();
573 unsigned int RtApiCore :: getDeviceCount( void )
575 // Find out how many audio devices there are, if any.
577 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
578 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
579 if ( result != noErr ) {
580 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
581 error( RtAudioError::WARNING );
585 return dataSize / sizeof( AudioDeviceID );
588 unsigned int RtApiCore :: getDefaultInputDevice( void )
590 unsigned int nDevices = getDeviceCount();
591 if ( nDevices <= 1 ) return 0;
594 UInt32 dataSize = sizeof( AudioDeviceID );
595 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
596 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
597 if ( result != noErr ) {
598 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
599 error( RtAudioError::WARNING );
603 dataSize *= nDevices;
604 AudioDeviceID deviceList[ nDevices ];
605 property.mSelector = kAudioHardwarePropertyDevices;
606 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
607 if ( result != noErr ) {
608 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
609 error( RtAudioError::WARNING );
613 for ( unsigned int i=0; i<nDevices; i++ )
614 if ( id == deviceList[i] ) return i;
616 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
617 error( RtAudioError::WARNING );
621 unsigned int RtApiCore :: getDefaultOutputDevice( void )
623 unsigned int nDevices = getDeviceCount();
624 if ( nDevices <= 1 ) return 0;
627 UInt32 dataSize = sizeof( AudioDeviceID );
628 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
629 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
630 if ( result != noErr ) {
631 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
632 error( RtAudioError::WARNING );
636 dataSize = sizeof( AudioDeviceID ) * nDevices;
637 AudioDeviceID deviceList[ nDevices ];
638 property.mSelector = kAudioHardwarePropertyDevices;
639 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
640 if ( result != noErr ) {
641 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
642 error( RtAudioError::WARNING );
646 for ( unsigned int i=0; i<nDevices; i++ )
647 if ( id == deviceList[i] ) return i;
649 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
650 error( RtAudioError::WARNING );
654 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
656 RtAudio::DeviceInfo info;
660 unsigned int nDevices = getDeviceCount();
661 if ( nDevices == 0 ) {
662 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
663 error( RtAudioError::INVALID_USE );
667 if ( device >= nDevices ) {
668 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
669 error( RtAudioError::INVALID_USE );
673 AudioDeviceID deviceList[ nDevices ];
674 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
675 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
676 kAudioObjectPropertyScopeGlobal,
677 kAudioObjectPropertyElementMaster };
678 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
679 0, NULL, &dataSize, (void *) &deviceList );
680 if ( result != noErr ) {
681 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
682 error( RtAudioError::WARNING );
686 AudioDeviceID id = deviceList[ device ];
688 // Get the device name.
691 dataSize = sizeof( CFStringRef );
692 property.mSelector = kAudioObjectPropertyManufacturer;
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
694 if ( result != noErr ) {
695 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
696 errorText_ = errorStream_.str();
697 error( RtAudioError::WARNING );
701 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
702 long length = CFStringGetLength(cfname);
703 char *mname = (char *)malloc(length * 3 + 1);
704 #if defined( UNICODE ) || defined( _UNICODE )
705 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
707 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
709 info.name.append( (const char *)mname, strlen(mname) );
710 info.name.append( ": " );
714 property.mSelector = kAudioObjectPropertyName;
715 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
716 if ( result != noErr ) {
717 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
718 errorText_ = errorStream_.str();
719 error( RtAudioError::WARNING );
723 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
724 length = CFStringGetLength(cfname);
725 char *name = (char *)malloc(length * 3 + 1);
726 #if defined( UNICODE ) || defined( _UNICODE )
727 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
729 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
731 info.name.append( (const char *)name, strlen(name) );
735 // Get the output stream "configuration".
736 AudioBufferList *bufferList = nil;
737 property.mSelector = kAudioDevicePropertyStreamConfiguration;
738 property.mScope = kAudioDevicePropertyScopeOutput;
739 // property.mElement = kAudioObjectPropertyElementWildcard;
741 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
742 if ( result != noErr || dataSize == 0 ) {
743 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
744 errorText_ = errorStream_.str();
745 error( RtAudioError::WARNING );
749 // Allocate the AudioBufferList.
750 bufferList = (AudioBufferList *) malloc( dataSize );
751 if ( bufferList == NULL ) {
752 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
753 error( RtAudioError::WARNING );
757 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
758 if ( result != noErr || dataSize == 0 ) {
760 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
761 errorText_ = errorStream_.str();
762 error( RtAudioError::WARNING );
766 // Get output channel information.
767 unsigned int i, nStreams = bufferList->mNumberBuffers;
768 for ( i=0; i<nStreams; i++ )
769 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
772 // Get the input stream "configuration".
773 property.mScope = kAudioDevicePropertyScopeInput;
774 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
775 if ( result != noErr || dataSize == 0 ) {
776 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
777 errorText_ = errorStream_.str();
778 error( RtAudioError::WARNING );
782 // Allocate the AudioBufferList.
783 bufferList = (AudioBufferList *) malloc( dataSize );
784 if ( bufferList == NULL ) {
785 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
786 error( RtAudioError::WARNING );
790 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
791 if (result != noErr || dataSize == 0) {
793 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
794 errorText_ = errorStream_.str();
795 error( RtAudioError::WARNING );
799 // Get input channel information.
800 nStreams = bufferList->mNumberBuffers;
801 for ( i=0; i<nStreams; i++ )
802 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
805 // If device opens for both playback and capture, we determine the channels.
806 if ( info.outputChannels > 0 && info.inputChannels > 0 )
807 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
809 // Probe the device sample rates.
810 bool isInput = false;
811 if ( info.outputChannels == 0 ) isInput = true;
813 // Determine the supported sample rates.
814 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
815 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
816 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
817 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
818 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
819 errorText_ = errorStream_.str();
820 error( RtAudioError::WARNING );
824 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
825 AudioValueRange rangeList[ nRanges ];
826 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
827 if ( result != kAudioHardwareNoError ) {
828 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
829 errorText_ = errorStream_.str();
830 error( RtAudioError::WARNING );
834 // The sample rate reporting mechanism is a bit of a mystery. It
835 // seems that it can either return individual rates or a range of
836 // rates. I assume that if the min / max range values are the same,
837 // then that represents a single supported rate and if the min / max
838 // range values are different, the device supports an arbitrary
839 // range of values (though there might be multiple ranges, so we'll
840 // use the most conservative range).
841 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
842 bool haveValueRange = false;
843 info.sampleRates.clear();
844 for ( UInt32 i=0; i<nRanges; i++ ) {
845 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
846 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
847 info.sampleRates.push_back( tmpSr );
849 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
850 info.preferredSampleRate = tmpSr;
853 haveValueRange = true;
854 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
855 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
859 if ( haveValueRange ) {
860 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
861 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
862 info.sampleRates.push_back( SAMPLE_RATES[k] );
864 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
865 info.preferredSampleRate = SAMPLE_RATES[k];
870 // Sort and remove any redundant values
871 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
872 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
874 if ( info.sampleRates.size() == 0 ) {
875 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
876 errorText_ = errorStream_.str();
877 error( RtAudioError::WARNING );
881 // Probe the currently configured sample rate
883 dataSize = sizeof( Float64 );
884 property.mSelector = kAudioDevicePropertyNominalSampleRate;
885 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
886 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
888 // CoreAudio always uses 32-bit floating point data for PCM streams.
889 // Thus, any other "physical" formats supported by the device are of
890 // no interest to the client.
891 info.nativeFormats = RTAUDIO_FLOAT32;
893 if ( info.outputChannels > 0 )
894 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
895 if ( info.inputChannels > 0 )
896 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
902 static OSStatus callbackHandler( AudioDeviceID inDevice,
903 const AudioTimeStamp* /*inNow*/,
904 const AudioBufferList* inInputData,
905 const AudioTimeStamp* /*inInputTime*/,
906 AudioBufferList* outOutputData,
907 const AudioTimeStamp* /*inOutputTime*/,
910 CallbackInfo *info = (CallbackInfo *) infoPointer;
912 RtApiCore *object = (RtApiCore *) info->object;
913 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
914 return kAudioHardwareUnspecifiedError;
916 return kAudioHardwareNoError;
919 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
921 const AudioObjectPropertyAddress properties[],
924 for ( UInt32 i=0; i<nAddresses; i++ ) {
925 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
926 CallbackInfo *info = (CallbackInfo *) infoPointer;
927 RtApiCore *object = (RtApiCore *) info->object;
928 info->deviceDisconnected = true;
929 object->closeStream();
930 return kAudioHardwareUnspecifiedError;
934 return kAudioHardwareNoError;
937 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
939 const AudioObjectPropertyAddress properties[],
940 void* handlePointer )
942 CoreHandle *handle = (CoreHandle *) handlePointer;
943 for ( UInt32 i=0; i<nAddresses; i++ ) {
944 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
945 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
946 handle->xrun[1] = true;
948 handle->xrun[0] = true;
952 return kAudioHardwareNoError;
955 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
956 unsigned int firstChannel, unsigned int sampleRate,
957 RtAudioFormat format, unsigned int *bufferSize,
958 RtAudio::StreamOptions *options )
961 unsigned int nDevices = getDeviceCount();
962 if ( nDevices == 0 ) {
963 // This should not happen because a check is made before this function is called.
964 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
968 if ( device >= nDevices ) {
969 // This should not happen because a check is made before this function is called.
970 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
974 AudioDeviceID deviceList[ nDevices ];
975 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
976 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
977 kAudioObjectPropertyScopeGlobal,
978 kAudioObjectPropertyElementMaster };
979 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
980 0, NULL, &dataSize, (void *) &deviceList );
981 if ( result != noErr ) {
982 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
986 AudioDeviceID id = deviceList[ device ];
988 // Setup for stream mode.
989 bool isInput = false;
990 if ( mode == INPUT ) {
992 property.mScope = kAudioDevicePropertyScopeInput;
995 property.mScope = kAudioDevicePropertyScopeOutput;
997 // Get the stream "configuration".
998 AudioBufferList *bufferList = nil;
1000 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1001 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1002 if ( result != noErr || dataSize == 0 ) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Allocate the AudioBufferList.
1009 bufferList = (AudioBufferList *) malloc( dataSize );
1010 if ( bufferList == NULL ) {
1011 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1015 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1016 if (result != noErr || dataSize == 0) {
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1019 errorText_ = errorStream_.str();
1023 // Search for one or more streams that contain the desired number of
1024 // channels. CoreAudio devices can have an arbitrary number of
1025 // streams and each stream can have an arbitrary number of channels.
1026 // For each stream, a single buffer of interleaved samples is
1027 // provided. RtAudio prefers the use of one stream of interleaved
1028 // data or multiple consecutive single-channel streams. However, we
1029 // now support multiple consecutive multi-channel streams of
1030 // interleaved data as well.
1031 UInt32 iStream, offsetCounter = firstChannel;
1032 UInt32 nStreams = bufferList->mNumberBuffers;
1033 bool monoMode = false;
1034 bool foundStream = false;
1036 // First check that the device supports the requested number of
1038 UInt32 deviceChannels = 0;
1039 for ( iStream=0; iStream<nStreams; iStream++ )
1040 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1042 if ( deviceChannels < ( channels + firstChannel ) ) {
1044 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1045 errorText_ = errorStream_.str();
1049 // Look for a single stream meeting our needs.
1050 UInt32 firstStream = 0, streamCount = 1, streamChannels = 0, channelOffset = 0;
1051 for ( iStream=0; iStream<nStreams; iStream++ ) {
1052 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1053 if ( streamChannels >= channels + offsetCounter ) {
1054 firstStream = iStream;
1055 channelOffset = offsetCounter;
1059 if ( streamChannels > offsetCounter ) break;
1060 offsetCounter -= streamChannels;
1063 // If we didn't find a single stream above, then we should be able
1064 // to meet the channel specification with multiple streams.
1065 if ( foundStream == false ) {
1067 offsetCounter = firstChannel;
1068 for ( iStream=0; iStream<nStreams; iStream++ ) {
1069 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1070 if ( streamChannels > offsetCounter ) break;
1071 offsetCounter -= streamChannels;
1074 firstStream = iStream;
1075 channelOffset = offsetCounter;
1076 Int32 channelCounter = channels + offsetCounter - streamChannels;
1078 if ( streamChannels > 1 ) monoMode = false;
1079 while ( channelCounter > 0 ) {
1080 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1081 if ( streamChannels > 1 ) monoMode = false;
1082 channelCounter -= streamChannels;
1089 // Determine the buffer size.
1090 AudioValueRange bufferRange;
1091 dataSize = sizeof( AudioValueRange );
1092 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1093 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1095 if ( result != noErr ) {
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1097 errorText_ = errorStream_.str();
1101 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1102 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMaximum;
1103 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1105 // Set the buffer size. For multiple streams, I'm assuming we only
1106 // need to make this setting for the master channel.
1107 UInt32 theSize = (UInt32) *bufferSize;
1108 dataSize = sizeof( UInt32 );
1109 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1110 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1112 if ( result != noErr ) {
1113 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1114 errorText_ = errorStream_.str();
1118 // If attempting to setup a duplex stream, the bufferSize parameter
1119 // MUST be the same in both directions!
1120 *bufferSize = theSize;
1121 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1123 errorText_ = errorStream_.str();
1127 stream_.bufferSize = *bufferSize;
1128 stream_.nBuffers = 1;
1130 // Try to set "hog" mode ... it's not clear to me this is working.
1131 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1133 dataSize = sizeof( hog_pid );
1134 property.mSelector = kAudioDevicePropertyHogMode;
1135 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1136 if ( result != noErr ) {
1137 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1138 errorText_ = errorStream_.str();
1142 if ( hog_pid != getpid() ) {
1144 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1145 if ( result != noErr ) {
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1147 errorText_ = errorStream_.str();
1153 // Check and if necessary, change the sample rate for the device.
1154 Float64 nominalRate;
1155 dataSize = sizeof( Float64 );
1156 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1157 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1158 if ( result != noErr ) {
1159 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1160 errorText_ = errorStream_.str();
1164 // Only try to change the sample rate if off by more than 1 Hz.
1165 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1167 nominalRate = (Float64) sampleRate;
1168 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1169 if ( result != noErr ) {
1170 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1171 errorText_ = errorStream_.str();
1175 // Now wait until the reported nominal rate is what we just set.
1176 UInt32 microCounter = 0;
1177 Float64 reportedRate = 0.0;
1178 while ( reportedRate != nominalRate ) {
1179 microCounter += 5000;
1180 if ( microCounter > 2000000 ) break;
1182 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1185 if ( microCounter > 2000000 ) {
1186 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1187 errorText_ = errorStream_.str();
1192 // Now set the stream format for all streams. Also, check the
1193 // physical format of the device and change that if necessary.
1194 AudioStreamBasicDescription description;
1195 dataSize = sizeof( AudioStreamBasicDescription );
1196 property.mSelector = kAudioStreamPropertyVirtualFormat;
1197 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1198 if ( result != noErr ) {
1199 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1200 errorText_ = errorStream_.str();
1204 // Set the sample rate and data format id. However, only make the
1205 // change if the sample rate is not within 1.0 of the desired
1206 // rate and the format is not linear pcm.
1207 bool updateFormat = false;
1208 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1209 description.mSampleRate = (Float64) sampleRate;
1210 updateFormat = true;
1213 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1214 description.mFormatID = kAudioFormatLinearPCM;
1215 updateFormat = true;
1218 if ( updateFormat ) {
1219 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1220 if ( result != noErr ) {
1221 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1222 errorText_ = errorStream_.str();
1227 // Now check the physical format.
1228 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1229 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1230 if ( result != noErr ) {
1231 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1232 errorText_ = errorStream_.str();
1236 //std::cout << "Current physical stream format:" << std::endl;
1237 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1238 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1239 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1240 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1242 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1243 description.mFormatID = kAudioFormatLinearPCM;
1244 //description.mSampleRate = (Float64) sampleRate;
1245 AudioStreamBasicDescription testDescription = description;
1248 // We'll try higher bit rates first and then work our way down.
1249 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1255 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1257 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1259 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1261 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1263 bool setPhysicalFormat = false;
1264 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1265 testDescription = description;
1266 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1267 testDescription.mFormatFlags = physicalFormats[i].second;
1268 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1269 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1271 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1272 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1273 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1274 if ( result == noErr ) {
1275 setPhysicalFormat = true;
1276 //std::cout << "Updated physical stream format:" << std::endl;
1277 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1278 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1279 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1280 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1285 if ( !setPhysicalFormat ) {
1286 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1287 errorText_ = errorStream_.str();
1290 } // done setting virtual/physical formats.
1292 // Get the stream / device latency.
1294 dataSize = sizeof( UInt32 );
1295 property.mSelector = kAudioDevicePropertyLatency;
1296 if ( AudioObjectHasProperty( id, &property ) == true ) {
1297 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1298 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1300 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1301 errorText_ = errorStream_.str();
1302 error( RtAudioError::WARNING );
1306 // Byte-swapping: According to AudioHardware.h, the stream data will
1307 // always be presented in native-endian format, so we should never
1308 // need to byte swap.
1309 stream_.doByteSwap[mode] = false;
1311 // From the CoreAudio documentation, PCM data must be supplied as
1313 stream_.userFormat = format;
1314 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1316 if ( streamCount == 1 )
1317 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1318 else // multiple streams
1319 stream_.nDeviceChannels[mode] = channels;
1320 stream_.nUserChannels[mode] = channels;
1321 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1322 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1323 else stream_.userInterleaved = true;
1324 stream_.deviceInterleaved[mode] = true;
1325 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1327 // Set flags for buffer conversion.
1328 stream_.doConvertBuffer[mode] = false;
1329 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1332 stream_.doConvertBuffer[mode] = true;
1333 if ( streamCount == 1 ) {
1334 if ( stream_.nUserChannels[mode] > 1 &&
1335 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1336 stream_.doConvertBuffer[mode] = true;
1338 else if ( monoMode && stream_.userInterleaved )
1339 stream_.doConvertBuffer[mode] = true;
1341 // Allocate our CoreHandle structure for the stream.
1342 CoreHandle *handle = 0;
1343 if ( stream_.apiHandle == 0 ) {
1345 handle = new CoreHandle;
1347 catch ( std::bad_alloc& ) {
1348 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1352 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1353 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1356 stream_.apiHandle = (void *) handle;
1359 handle = (CoreHandle *) stream_.apiHandle;
1360 handle->iStream[mode] = firstStream;
1361 handle->nStreams[mode] = streamCount;
1362 handle->id[mode] = id;
1364 // Allocate necessary internal buffers.
1365 unsigned long bufferBytes;
1366 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1367 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, sizeof(char) );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1434 if ( result != noErr ) {
1435 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1436 errorText_ = errorStream_.str();
1440 // Setup a listener to detect a possible device disconnect.
1441 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1442 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1443 if ( result != noErr ) {
1444 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1445 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1446 errorText_ = errorStream_.str();
1454 pthread_cond_destroy( &handle->condition );
1456 stream_.apiHandle = 0;
1459 for ( int i=0; i<2; i++ ) {
1460 if ( stream_.userBuffer[i] ) {
1461 free( stream_.userBuffer[i] );
1462 stream_.userBuffer[i] = 0;
1466 if ( stream_.deviceBuffer ) {
1467 free( stream_.deviceBuffer );
1468 stream_.deviceBuffer = 0;
1472 //stream_.state = STREAM_CLOSED;
1476 void RtApiCore :: closeStream( void )
1478 if ( stream_.state == STREAM_CLOSED ) {
1479 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1480 error( RtAudioError::WARNING );
1484 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1485 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1487 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1488 kAudioObjectPropertyScopeGlobal,
1489 kAudioObjectPropertyElementMaster };
1491 property.mSelector = kAudioDeviceProcessorOverload;
1492 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1493 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1494 error( RtAudioError::WARNING );
1496 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1497 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1498 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1499 error( RtAudioError::WARNING );
1502 if ( stream_.state == STREAM_RUNNING )
1503 AudioDeviceStop( handle->id[0], callbackHandler );
1504 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1505 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1507 // deprecated in favor of AudioDeviceDestroyIOProcID()
1508 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1512 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1514 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1515 kAudioObjectPropertyScopeGlobal,
1516 kAudioObjectPropertyElementMaster };
1518 property.mSelector = kAudioDeviceProcessorOverload;
1519 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1520 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1521 error( RtAudioError::WARNING );
1523 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1524 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1525 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1526 error( RtAudioError::WARNING );
1529 if ( stream_.state == STREAM_RUNNING )
1530 AudioDeviceStop( handle->id[1], callbackHandler );
1531 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1532 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1534 // deprecated in favor of AudioDeviceDestroyIOProcID()
1535 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1539 for ( int i=0; i<2; i++ ) {
1540 if ( stream_.userBuffer[i] ) {
1541 free( stream_.userBuffer[i] );
1542 stream_.userBuffer[i] = 0;
1546 if ( stream_.deviceBuffer ) {
1547 free( stream_.deviceBuffer );
1548 stream_.deviceBuffer = 0;
1551 // Destroy pthread condition variable.
1552 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1553 pthread_cond_destroy( &handle->condition );
1555 stream_.apiHandle = 0;
1557 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1558 if ( info->deviceDisconnected ) {
1559 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1560 error( RtAudioError::DEVICE_DISCONNECT );
1564 //stream_.mode = UNINITIALIZED;
1565 //stream_.state = STREAM_CLOSED;
1568 //void RtApiCore :: startStream( void )
1569 RtAudioError::Type RtApiCore :: startStream( void )
1572 if ( stream_.state != STREAM_STOPPED ) {
1573 if ( stream_.state == STREAM_RUNNING )
1574 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1575 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1576 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1577 return error( RtAudioError::WARNING );
1582 #if defined( HAVE_GETTIMEOFDAY )
1583 gettimeofday( &stream_.lastTickTimestamp, NULL );
1587 OSStatus result = noErr;
1588 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1589 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1591 result = AudioDeviceStart( handle->id[0], callbackHandler );
1592 if ( result != noErr ) {
1593 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1594 errorText_ = errorStream_.str();
1599 if ( stream_.mode == INPUT ||
1600 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1602 // Clear user input buffer
1603 unsigned long bufferBytes;
1604 bufferBytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
1605 memset( stream_.userBuffer[1], 0, bufferBytes * sizeof(char) );
1607 result = AudioDeviceStart( handle->id[1], callbackHandler );
1608 if ( result != noErr ) {
1609 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1610 errorText_ = errorStream_.str();
1615 // set stream time to zero?
1616 handle->drainCounter = 0;
1617 handle->internalDrain = false;
1618 stream_.state = STREAM_RUNNING;
1621 if ( result == noErr ) return RtAudioError::NO_ERROR;
1622 return error( RtAudioError::SYSTEM_ERROR );
1625 //void RtApiCore :: stopStream( void )
1626 RtAudioError::Type RtApiCore :: stopStream( void )
1629 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1630 if ( stream_.state == STREAM_STOPPED )
1631 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1632 else if ( stream_.state == STREAM_CLOSED )
1633 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1634 return error( RtAudioError::WARNING );
1638 OSStatus result = noErr;
1639 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1640 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1642 if ( handle->drainCounter == 0 ) {
1643 handle->drainCounter = 2;
1644 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1647 result = AudioDeviceStop( handle->id[0], callbackHandler );
1648 if ( result != noErr ) {
1649 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1650 errorText_ = errorStream_.str();
1655 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1657 result = AudioDeviceStop( handle->id[1], callbackHandler );
1658 if ( result != noErr ) {
1659 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1660 errorText_ = errorStream_.str();
1665 stream_.state = STREAM_STOPPED;
1668 if ( result == noErr ) return RtAudioError::NO_ERROR;
1669 return error( RtAudioError::SYSTEM_ERROR );
1672 //void RtApiCore :: abortStream( void )
1673 RtAudioError::Type RtApiCore :: abortStream( void )
1676 if ( stream_.state != STREAM_RUNNING ) {
1677 if ( stream_.state == STREAM_STOPPED )
1678 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1679 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1680 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1681 return error( RtAudioError::WARNING );
1685 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1686 handle->drainCounter = 2;
1688 stream_.state = STREAM_STOPPING;
1689 return stopStream();
1692 // This function will be called by a spawned thread when the user
1693 // callback function signals that the stream should be stopped or
1694 // aborted. It is better to handle it this way because the
1695 // callbackEvent() function probably should return before the AudioDeviceStop()
1696 // function is called.
1697 static void *coreStopStream( void *ptr )
1699 CallbackInfo *info = (CallbackInfo *) ptr;
1700 RtApiCore *object = (RtApiCore *) info->object;
1702 object->stopStream();
1703 pthread_exit( NULL );
1706 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1707 const AudioBufferList *inBufferList,
1708 const AudioBufferList *outBufferList )
1710 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1711 if ( stream_.state == STREAM_CLOSED ) {
1712 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1713 error( RtAudioError::WARNING );
1717 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1718 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1720 // Check if we were draining the stream and signal is finished.
1721 if ( handle->drainCounter > 3 ) {
1722 ThreadHandle threadId;
1724 stream_.state = STREAM_STOPPING;
1725 if ( handle->internalDrain == true )
1726 pthread_create( &threadId, NULL, coreStopStream, info );
1727 else // external call to stopStream()
1728 pthread_cond_signal( &handle->condition );
1732 AudioDeviceID outputDevice = handle->id[0];
1734 // Invoke user callback to get fresh output data UNLESS we are
1735 // draining stream or duplex mode AND the input/output devices are
1736 // different AND this function is called for the input device.
1737 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1738 RtAudioCallback callback = (RtAudioCallback) info->callback;
1739 double streamTime = getStreamTime();
1740 RtAudioStreamStatus status = 0;
1741 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1742 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1743 handle->xrun[0] = false;
1745 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1746 status |= RTAUDIO_INPUT_OVERFLOW;
1747 handle->xrun[1] = false;
1750 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1751 stream_.bufferSize, streamTime, status, info->userData );
1752 if ( cbReturnValue == 2 ) {
1756 else if ( cbReturnValue == 1 ) {
1757 handle->drainCounter = 1;
1758 handle->internalDrain = true;
1762 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1764 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1766 if ( handle->nStreams[0] == 1 ) {
1767 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1769 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1771 else { // fill multiple streams with zeros
1772 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1773 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1775 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1779 else if ( handle->nStreams[0] == 1 ) {
1780 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1781 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1782 stream_.userBuffer[0], stream_.convertInfo[0] );
1784 else { // copy from user buffer
1785 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1786 stream_.userBuffer[0],
1787 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1790 else { // fill multiple streams
1791 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1792 if ( stream_.doConvertBuffer[0] ) {
1793 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1794 inBuffer = (Float32 *) stream_.deviceBuffer;
1797 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1798 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1799 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1800 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1801 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1804 else { // fill multiple multi-channel streams with interleaved data
1805 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1808 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1809 UInt32 inChannels = stream_.nUserChannels[0];
1810 if ( stream_.doConvertBuffer[0] ) {
1811 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1812 inChannels = stream_.nDeviceChannels[0];
1815 if ( inInterleaved ) inOffset = 1;
1816 else inOffset = stream_.bufferSize;
1818 channelsLeft = inChannels;
1819 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1821 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1822 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1825 // Account for possible channel offset in first stream
1826 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1827 streamChannels -= stream_.channelOffset[0];
1828 outJump = stream_.channelOffset[0];
1832 // Account for possible unfilled channels at end of the last stream
1833 if ( streamChannels > channelsLeft ) {
1834 outJump = streamChannels - channelsLeft;
1835 streamChannels = channelsLeft;
1838 // Determine input buffer offsets and skips
1839 if ( inInterleaved ) {
1840 inJump = inChannels;
1841 in += inChannels - channelsLeft;
1845 in += (inChannels - channelsLeft) * inOffset;
1848 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1849 for ( unsigned int j=0; j<streamChannels; j++ ) {
1850 *out++ = in[j*inOffset];
1855 channelsLeft -= streamChannels;
1861 // Don't bother draining input
1862 if ( handle->drainCounter ) {
1863 handle->drainCounter++;
1867 AudioDeviceID inputDevice;
1868 inputDevice = handle->id[1];
1869 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1871 if ( handle->nStreams[1] == 1 ) {
1872 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1873 convertBuffer( stream_.userBuffer[1],
1874 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1875 stream_.convertInfo[1] );
1877 else { // copy to user buffer
1878 memcpy( stream_.userBuffer[1],
1879 inBufferList->mBuffers[handle->iStream[1]].mData,
1880 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1883 else { // read from multiple streams
1884 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1885 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1887 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1888 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1889 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1890 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1891 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1894 else { // read from multiple multi-channel streams
1895 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1898 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1899 UInt32 outChannels = stream_.nUserChannels[1];
1900 if ( stream_.doConvertBuffer[1] ) {
1901 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1902 outChannels = stream_.nDeviceChannels[1];
1905 if ( outInterleaved ) outOffset = 1;
1906 else outOffset = stream_.bufferSize;
1908 channelsLeft = outChannels;
1909 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1911 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1912 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1915 // Account for possible channel offset in first stream
1916 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1917 streamChannels -= stream_.channelOffset[1];
1918 inJump = stream_.channelOffset[1];
1922 // Account for possible unread channels at end of the last stream
1923 if ( streamChannels > channelsLeft ) {
1924 inJump = streamChannels - channelsLeft;
1925 streamChannels = channelsLeft;
1928 // Determine output buffer offsets and skips
1929 if ( outInterleaved ) {
1930 outJump = outChannels;
1931 out += outChannels - channelsLeft;
1935 out += (outChannels - channelsLeft) * outOffset;
1938 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1939 for ( unsigned int j=0; j<streamChannels; j++ ) {
1940 out[j*outOffset] = *in++;
1945 channelsLeft -= streamChannels;
1949 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1950 convertBuffer( stream_.userBuffer[1],
1951 stream_.deviceBuffer,
1952 stream_.convertInfo[1] );
1959 // Make sure to only tick duplex stream time once if using two devices
1960 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1961 RtApi::tickStreamTime();
1966 const char* RtApiCore :: getErrorCode( OSStatus code )
1970 case kAudioHardwareNotRunningError:
1971 return "kAudioHardwareNotRunningError";
1973 case kAudioHardwareUnspecifiedError:
1974 return "kAudioHardwareUnspecifiedError";
1976 case kAudioHardwareUnknownPropertyError:
1977 return "kAudioHardwareUnknownPropertyError";
1979 case kAudioHardwareBadPropertySizeError:
1980 return "kAudioHardwareBadPropertySizeError";
1982 case kAudioHardwareIllegalOperationError:
1983 return "kAudioHardwareIllegalOperationError";
1985 case kAudioHardwareBadObjectError:
1986 return "kAudioHardwareBadObjectError";
1988 case kAudioHardwareBadDeviceError:
1989 return "kAudioHardwareBadDeviceError";
1991 case kAudioHardwareBadStreamError:
1992 return "kAudioHardwareBadStreamError";
1994 case kAudioHardwareUnsupportedOperationError:
1995 return "kAudioHardwareUnsupportedOperationError";
1997 case kAudioDeviceUnsupportedFormatError:
1998 return "kAudioDeviceUnsupportedFormatError";
2000 case kAudioDevicePermissionsError:
2001 return "kAudioDevicePermissionsError";
2004 return "CoreAudio unknown error";
2008 //******************** End of __MACOSX_CORE__ *********************//
2011 #if defined(__UNIX_JACK__)
2013 // JACK is a low-latency audio server, originally written for the
2014 // GNU/Linux operating system and now also ported to OS-X. It can
2015 // connect a number of different applications to an audio device, as
2016 // well as allowing them to share audio between themselves.
2018 // When using JACK with RtAudio, "devices" refer to JACK clients that
2019 // have ports connected to the server. The JACK server is typically
2020 // started in a terminal as follows:
2022 // .jackd -d alsa -d hw:0
2024 // or through an interface program such as qjackctl. Many of the
2025 // parameters normally set for a stream are fixed by the JACK server
2026 // and can be specified when the JACK server is started. In
2029 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2031 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2032 // frames, and number of buffers = 4. Once the server is running, it
2033 // is not possible to override these values. If the values are not
2034 // specified in the command-line, the JACK server uses default values.
2036 // The JACK server does not have to be running when an instance of
2037 // RtApiJack is created, though the function getDeviceCount() will
2038 // report 0 devices found until JACK has been started. When no
2039 // devices are available (i.e., the JACK server is not running), a
2040 // stream cannot be opened.
2042 #include <jack/jack.h>
2046 // A structure to hold various information related to the Jack API
2049 jack_client_t *client;
2050 jack_port_t **ports[2];
2051 std::string deviceName[2];
2053 pthread_cond_t condition;
2054 int drainCounter; // Tracks callback counts when draining
2055 bool internalDrain; // Indicates if stop is initiated from callback or not.
2058 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2061 #if !defined(__RTAUDIO_DEBUG__)
2062 static void jackSilentError( const char * ) {};
2065 RtApiJack :: RtApiJack()
2066 :shouldAutoconnect_(true) {
2067 // Nothing to do here.
2068 #if !defined(__RTAUDIO_DEBUG__)
2069 // Turn off Jack's internal error reporting.
2070 jack_set_error_function( &jackSilentError );
2074 RtApiJack :: ~RtApiJack()
2076 if ( stream_.state != STREAM_CLOSED ) closeStream();
2079 unsigned int RtApiJack :: getDeviceCount( void )
2081 // See if we can become a jack client.
2082 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2083 jack_status_t *status = NULL;
2084 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2085 if ( client == 0 ) return 0;
2088 std::string port, previousPort;
2089 unsigned int nChannels = 0, nDevices = 0;
2090 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2092 // Parse the port names up to the first colon (:).
2095 port = (char *) ports[ nChannels ];
2096 iColon = port.find(":");
2097 if ( iColon != std::string::npos ) {
2098 port = port.substr( 0, iColon + 1 );
2099 if ( port != previousPort ) {
2101 previousPort = port;
2104 } while ( ports[++nChannels] );
2108 jack_client_close( client );
2112 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2114 RtAudio::DeviceInfo info;
2115 info.probed = false;
2117 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2118 jack_status_t *status = NULL;
2119 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2120 if ( client == 0 ) {
2121 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2122 error( RtAudioError::WARNING );
2127 std::string port, previousPort;
2128 unsigned int nPorts = 0, nDevices = 0;
2129 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2131 // Parse the port names up to the first colon (:).
2134 port = (char *) ports[ nPorts ];
2135 iColon = port.find(":");
2136 if ( iColon != std::string::npos ) {
2137 port = port.substr( 0, iColon );
2138 if ( port != previousPort ) {
2139 if ( nDevices == device ) info.name = port;
2141 previousPort = port;
2144 } while ( ports[++nPorts] );
2148 if ( device >= nDevices ) {
2149 jack_client_close( client );
2150 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2151 error( RtAudioError::INVALID_USE );
2155 // Get the current jack server sample rate.
2156 info.sampleRates.clear();
2158 info.preferredSampleRate = jack_get_sample_rate( client );
2159 info.sampleRates.push_back( info.preferredSampleRate );
2161 // Count the available ports containing the client name as device
2162 // channels. Jack "input ports" equal RtAudio output channels.
2163 unsigned int nChannels = 0;
2164 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2166 while ( ports[ nChannels ] ) nChannels++;
2168 info.outputChannels = nChannels;
2171 // Jack "output ports" equal RtAudio input channels.
2173 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2175 while ( ports[ nChannels ] ) nChannels++;
2177 info.inputChannels = nChannels;
2180 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2181 jack_client_close(client);
2182 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2183 error( RtAudioError::WARNING );
2187 // If device opens for both playback and capture, we determine the channels.
2188 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2189 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2191 // Jack always uses 32-bit floats.
2192 info.nativeFormats = RTAUDIO_FLOAT32;
2194 // Jack doesn't provide default devices so we'll use the first available one.
2195 if ( device == 0 && info.outputChannels > 0 )
2196 info.isDefaultOutput = true;
2197 if ( device == 0 && info.inputChannels > 0 )
2198 info.isDefaultInput = true;
2200 jack_client_close(client);
2205 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2207 CallbackInfo *info = (CallbackInfo *) infoPointer;
2209 RtApiJack *object = (RtApiJack *) info->object;
2210 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2215 // This function will be called by a spawned thread when the Jack
2216 // server signals that it is shutting down. It is necessary to handle
2217 // it this way because the jackShutdown() function must return before
2218 // the jack_deactivate() function (in closeStream()) will return.
2219 static void *jackCloseStream( void *ptr )
2221 CallbackInfo *info = (CallbackInfo *) ptr;
2222 RtApiJack *object = (RtApiJack *) info->object;
2224 object->closeStream();
2226 pthread_exit( NULL );
2228 static void jackShutdown( void *infoPointer )
2230 CallbackInfo *info = (CallbackInfo *) infoPointer;
2231 RtApiJack *object = (RtApiJack *) info->object;
2233 // Check current stream state. If stopped, then we'll assume this
2234 // was called as a result of a call to RtApiJack::stopStream (the
2235 // deactivation of a client handle causes this function to be called).
2236 // If not, we'll assume the Jack server is shutting down or some
2237 // other problem occurred and we should close the stream.
2238 if ( object->isStreamRunning() == false ) return;
2240 ThreadHandle threadId;
2241 pthread_create( &threadId, NULL, jackCloseStream, info );
2242 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2245 static int jackXrun( void *infoPointer )
2247 JackHandle *handle = *((JackHandle **) infoPointer);
2249 if ( handle->ports[0] ) handle->xrun[0] = true;
2250 if ( handle->ports[1] ) handle->xrun[1] = true;
2255 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2256 unsigned int firstChannel, unsigned int sampleRate,
2257 RtAudioFormat format, unsigned int *bufferSize,
2258 RtAudio::StreamOptions *options )
2260 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2262 // Look for jack server and try to become a client (only do once per stream).
2263 jack_client_t *client = 0;
2264 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2265 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2266 jack_status_t *status = NULL;
2267 if ( options && !options->streamName.empty() )
2268 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2270 client = jack_client_open( "RtApiJack", jackoptions, status );
2271 if ( client == 0 ) {
2272 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2273 error( RtAudioError::WARNING );
2278 // The handle must have been created on an earlier pass.
2279 client = handle->client;
2283 std::string port, previousPort, deviceName;
2284 unsigned int nPorts = 0, nDevices = 0;
2285 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2287 // Parse the port names up to the first colon (:).
2290 port = (char *) ports[ nPorts ];
2291 iColon = port.find(":");
2292 if ( iColon != std::string::npos ) {
2293 port = port.substr( 0, iColon );
2294 if ( port != previousPort ) {
2295 if ( nDevices == device ) deviceName = port;
2297 previousPort = port;
2300 } while ( ports[++nPorts] );
2304 if ( device >= nDevices ) {
2305 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2309 unsigned long flag = JackPortIsInput;
2310 if ( mode == INPUT ) flag = JackPortIsOutput;
2312 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2313 // Count the available ports containing the client name as device
2314 // channels. Jack "input ports" equal RtAudio output channels.
2315 unsigned int nChannels = 0;
2316 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2318 while ( ports[ nChannels ] ) nChannels++;
2321 // Compare the jack ports for specified client to the requested number of channels.
2322 if ( nChannels < (channels + firstChannel) ) {
2323 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2324 errorText_ = errorStream_.str();
2329 // Check the jack server sample rate.
2330 unsigned int jackRate = jack_get_sample_rate( client );
2331 if ( sampleRate != jackRate ) {
2332 jack_client_close( client );
2333 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2334 errorText_ = errorStream_.str();
2337 stream_.sampleRate = jackRate;
2339 // Get the latency of the JACK port.
2340 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2341 if ( ports[ firstChannel ] ) {
2343 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2344 // the range (usually the min and max are equal)
2345 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2346 // get the latency range
2347 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2348 // be optimistic, use the min!
2349 stream_.latency[mode] = latrange.min;
2350 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2354 // The jack server always uses 32-bit floating-point data.
2355 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2356 stream_.userFormat = format;
2358 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2359 else stream_.userInterleaved = true;
2361 // Jack always uses non-interleaved buffers.
2362 stream_.deviceInterleaved[mode] = false;
2364 // Jack always provides host byte-ordered data.
2365 stream_.doByteSwap[mode] = false;
2367 // Get the buffer size. The buffer size and number of buffers
2368 // (periods) is set when the jack server is started.
2369 stream_.bufferSize = (int) jack_get_buffer_size( client );
2370 *bufferSize = stream_.bufferSize;
2372 stream_.nDeviceChannels[mode] = channels;
2373 stream_.nUserChannels[mode] = channels;
2375 // Set flags for buffer conversion.
2376 stream_.doConvertBuffer[mode] = false;
2377 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2378 stream_.doConvertBuffer[mode] = true;
2379 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2380 stream_.nUserChannels[mode] > 1 )
2381 stream_.doConvertBuffer[mode] = true;
2383 // Allocate our JackHandle structure for the stream.
2384 if ( handle == 0 ) {
2386 handle = new JackHandle;
2388 catch ( std::bad_alloc& ) {
2389 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2393 if ( pthread_cond_init(&handle->condition, NULL) ) {
2394 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2397 stream_.apiHandle = (void *) handle;
2398 handle->client = client;
2400 handle->deviceName[mode] = deviceName;
2402 // Allocate necessary internal buffers.
2403 unsigned long bufferBytes;
2404 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2405 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2406 if ( stream_.userBuffer[mode] == NULL ) {
2407 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2411 if ( stream_.doConvertBuffer[mode] ) {
2413 bool makeBuffer = true;
2414 if ( mode == OUTPUT )
2415 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2416 else { // mode == INPUT
2417 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2418 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2419 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2420 if ( bufferBytes < bytesOut ) makeBuffer = false;
2425 bufferBytes *= *bufferSize;
2426 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2427 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2428 if ( stream_.deviceBuffer == NULL ) {
2429 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2435 // Allocate memory for the Jack ports (channels) identifiers.
2436 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2437 if ( handle->ports[mode] == NULL ) {
2438 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2442 stream_.device[mode] = device;
2443 stream_.channelOffset[mode] = firstChannel;
2444 stream_.state = STREAM_STOPPED;
2445 stream_.callbackInfo.object = (void *) this;
2447 if ( stream_.mode == OUTPUT && mode == INPUT )
2448 // We had already set up the stream for output.
2449 stream_.mode = DUPLEX;
2451 stream_.mode = mode;
2452 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2453 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2454 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2457 // Register our ports.
2459 if ( mode == OUTPUT ) {
2460 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2461 snprintf( label, 64, "outport %d", i );
2462 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2463 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2467 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2468 snprintf( label, 64, "inport %d", i );
2469 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2470 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2474 // Setup the buffer conversion information structure. We don't use
2475 // buffers to do channel offsets, so we override that parameter
2477 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2479 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2485 pthread_cond_destroy( &handle->condition );
2486 jack_client_close( handle->client );
2488 if ( handle->ports[0] ) free( handle->ports[0] );
2489 if ( handle->ports[1] ) free( handle->ports[1] );
2492 stream_.apiHandle = 0;
2495 for ( int i=0; i<2; i++ ) {
2496 if ( stream_.userBuffer[i] ) {
2497 free( stream_.userBuffer[i] );
2498 stream_.userBuffer[i] = 0;
2502 if ( stream_.deviceBuffer ) {
2503 free( stream_.deviceBuffer );
2504 stream_.deviceBuffer = 0;
2510 void RtApiJack :: closeStream( void )
2512 if ( stream_.state == STREAM_CLOSED ) {
2513 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2514 error( RtAudioError::WARNING );
2518 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.state == STREAM_RUNNING )
2522 jack_deactivate( handle->client );
2524 jack_client_close( handle->client );
2528 if ( handle->ports[0] ) free( handle->ports[0] );
2529 if ( handle->ports[1] ) free( handle->ports[1] );
2530 pthread_cond_destroy( &handle->condition );
2532 stream_.apiHandle = 0;
2535 for ( int i=0; i<2; i++ ) {
2536 if ( stream_.userBuffer[i] ) {
2537 free( stream_.userBuffer[i] );
2538 stream_.userBuffer[i] = 0;
2542 if ( stream_.deviceBuffer ) {
2543 free( stream_.deviceBuffer );
2544 stream_.deviceBuffer = 0;
2547 stream_.mode = UNINITIALIZED;
2548 stream_.state = STREAM_CLOSED;
2551 void RtApiJack :: startStream( void )
2554 if ( stream_.state == STREAM_RUNNING ) {
2555 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2556 error( RtAudioError::WARNING );
2560 #if defined( HAVE_GETTIMEOFDAY )
2561 gettimeofday( &stream_.lastTickTimestamp, NULL );
2564 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2565 int result = jack_activate( handle->client );
2567 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2573 // Get the list of available ports.
2574 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2576 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2577 if ( ports == NULL) {
2578 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2582 // Now make the port connections. Since RtAudio wasn't designed to
2583 // allow the user to select particular channels of a device, we'll
2584 // just open the first "nChannels" ports with offset.
2585 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2587 if ( ports[ stream_.channelOffset[0] + i ] )
2588 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2591 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2598 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2600 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2601 if ( ports == NULL) {
2602 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2606 // Now make the port connections. See note above.
2607 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2609 if ( ports[ stream_.channelOffset[1] + i ] )
2610 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2613 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2620 handle->drainCounter = 0;
2621 handle->internalDrain = false;
2622 stream_.state = STREAM_RUNNING;
2625 if ( result == 0 ) return;
2626 error( RtAudioError::SYSTEM_ERROR );
2629 void RtApiJack :: stopStream( void )
2632 if ( stream_.state == STREAM_STOPPED ) {
2633 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2634 error( RtAudioError::WARNING );
2638 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2639 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2641 if ( handle->drainCounter == 0 ) {
2642 handle->drainCounter = 2;
2643 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2647 jack_deactivate( handle->client );
2648 stream_.state = STREAM_STOPPED;
2651 void RtApiJack :: abortStream( void )
2654 if ( stream_.state == STREAM_STOPPED ) {
2655 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2656 error( RtAudioError::WARNING );
2660 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2661 handle->drainCounter = 2;
2666 // This function will be called by a spawned thread when the user
2667 // callback function signals that the stream should be stopped or
2668 // aborted. It is necessary to handle it this way because the
2669 // callbackEvent() function must return before the jack_deactivate()
2670 // function will return.
2671 static void *jackStopStream( void *ptr )
2673 CallbackInfo *info = (CallbackInfo *) ptr;
2674 RtApiJack *object = (RtApiJack *) info->object;
2676 object->stopStream();
2677 pthread_exit( NULL );
2680 bool RtApiJack :: callbackEvent( unsigned long nframes )
2682 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2683 if ( stream_.state == STREAM_CLOSED ) {
2684 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2685 error( RtAudioError::WARNING );
2688 if ( stream_.bufferSize != nframes ) {
2689 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2690 error( RtAudioError::WARNING );
2694 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2695 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2697 // Check if we were draining the stream and signal is finished.
2698 if ( handle->drainCounter > 3 ) {
2699 ThreadHandle threadId;
2701 stream_.state = STREAM_STOPPING;
2702 if ( handle->internalDrain == true )
2703 pthread_create( &threadId, NULL, jackStopStream, info );
2705 pthread_cond_signal( &handle->condition );
2709 // Invoke user callback first, to get fresh output data.
2710 if ( handle->drainCounter == 0 ) {
2711 RtAudioCallback callback = (RtAudioCallback) info->callback;
2712 double streamTime = getStreamTime();
2713 RtAudioStreamStatus status = 0;
2714 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2715 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2716 handle->xrun[0] = false;
2718 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2719 status |= RTAUDIO_INPUT_OVERFLOW;
2720 handle->xrun[1] = false;
2722 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2723 stream_.bufferSize, streamTime, status, info->userData );
2724 if ( cbReturnValue == 2 ) {
2725 stream_.state = STREAM_STOPPING;
2726 handle->drainCounter = 2;
2728 pthread_create( &id, NULL, jackStopStream, info );
2731 else if ( cbReturnValue == 1 ) {
2732 handle->drainCounter = 1;
2733 handle->internalDrain = true;
2737 jack_default_audio_sample_t *jackbuffer;
2738 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2739 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2741 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2743 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2744 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2745 memset( jackbuffer, 0, bufferBytes );
2749 else if ( stream_.doConvertBuffer[0] ) {
2751 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2753 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2754 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2755 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2758 else { // no buffer conversion
2759 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2760 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2761 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2766 // Don't bother draining input
2767 if ( handle->drainCounter ) {
2768 handle->drainCounter++;
2772 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2774 if ( stream_.doConvertBuffer[1] ) {
2775 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2776 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2777 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2779 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2781 else { // no buffer conversion
2782 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2783 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2784 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2790 RtApi::tickStreamTime();
2793 //******************** End of __UNIX_JACK__ *********************//
2796 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2798 // The ASIO API is designed around a callback scheme, so this
2799 // implementation is similar to that used for OS-X CoreAudio and Linux
2800 // Jack. The primary constraint with ASIO is that it only allows
2801 // access to a single driver at a time. Thus, it is not possible to
2802 // have more than one simultaneous RtAudio stream.
2804 // This implementation also requires a number of external ASIO files
2805 // and a few global variables. The ASIO callback scheme does not
2806 // allow for the passing of user data, so we must create a global
2807 // pointer to our callbackInfo structure.
2809 // On unix systems, we make use of a pthread condition variable.
2810 // Since there is no equivalent in Windows, I hacked something based
2811 // on information found in
2812 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2814 #include "asiosys.h"
2816 #include "iasiothiscallresolver.h"
2817 #include "asiodrivers.h"
2820 static AsioDrivers drivers;
2821 static ASIOCallbacks asioCallbacks;
2822 static ASIODriverInfo driverInfo;
2823 static CallbackInfo *asioCallbackInfo;
2824 static bool asioXRun;
2827 int drainCounter; // Tracks callback counts when draining
2828 bool internalDrain; // Indicates if stop is initiated from callback or not.
2829 ASIOBufferInfo *bufferInfos;
2833 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2836 // Function declarations (definitions at end of section)
2837 static const char* getAsioErrorString( ASIOError result );
2838 static void sampleRateChanged( ASIOSampleRate sRate );
2839 static long asioMessages( long selector, long value, void* message, double* opt );
2841 RtApiAsio :: RtApiAsio()
2843 // ASIO cannot run on a multi-threaded appartment. You can call
2844 // CoInitialize beforehand, but it must be for appartment threading
2845 // (in which case, CoInitilialize will return S_FALSE here).
2846 coInitialized_ = false;
2847 HRESULT hr = CoInitialize( NULL );
2849 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2850 error( RtAudioError::WARNING );
2852 coInitialized_ = true;
2854 drivers.removeCurrentDriver();
2855 driverInfo.asioVersion = 2;
2857 // See note in DirectSound implementation about GetDesktopWindow().
2858 driverInfo.sysRef = GetForegroundWindow();
2861 RtApiAsio :: ~RtApiAsio()
2863 if ( stream_.state != STREAM_CLOSED ) closeStream();
2864 if ( coInitialized_ ) CoUninitialize();
2867 unsigned int RtApiAsio :: getDeviceCount( void )
2869 return (unsigned int) drivers.asioGetNumDev();
2872 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2874 RtAudio::DeviceInfo info;
2875 info.probed = false;
2878 unsigned int nDevices = getDeviceCount();
2879 if ( nDevices == 0 ) {
2880 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2881 error( RtAudioError::INVALID_USE );
2885 if ( device >= nDevices ) {
2886 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2887 error( RtAudioError::INVALID_USE );
2891 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2892 if ( stream_.state != STREAM_CLOSED ) {
2893 if ( device >= devices_.size() ) {
2894 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2895 error( RtAudioError::WARNING );
2898 return devices_[ device ];
2901 char driverName[32];
2902 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2903 if ( result != ASE_OK ) {
2904 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2905 errorText_ = errorStream_.str();
2906 error( RtAudioError::WARNING );
2910 info.name = driverName;
2912 if ( !drivers.loadDriver( driverName ) ) {
2913 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2914 errorText_ = errorStream_.str();
2915 error( RtAudioError::WARNING );
2919 result = ASIOInit( &driverInfo );
2920 if ( result != ASE_OK ) {
2921 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2922 errorText_ = errorStream_.str();
2923 error( RtAudioError::WARNING );
2927 // Determine the device channel information.
2928 long inputChannels, outputChannels;
2929 result = ASIOGetChannels( &inputChannels, &outputChannels );
2930 if ( result != ASE_OK ) {
2931 drivers.removeCurrentDriver();
2932 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2933 errorText_ = errorStream_.str();
2934 error( RtAudioError::WARNING );
2938 info.outputChannels = outputChannels;
2939 info.inputChannels = inputChannels;
2940 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2941 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2943 // Determine the supported sample rates.
2944 info.sampleRates.clear();
2945 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2946 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2947 if ( result == ASE_OK ) {
2948 info.sampleRates.push_back( SAMPLE_RATES[i] );
2950 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2951 info.preferredSampleRate = SAMPLE_RATES[i];
2955 // Determine supported data types ... just check first channel and assume rest are the same.
2956 ASIOChannelInfo channelInfo;
2957 channelInfo.channel = 0;
2958 channelInfo.isInput = true;
2959 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2960 result = ASIOGetChannelInfo( &channelInfo );
2961 if ( result != ASE_OK ) {
2962 drivers.removeCurrentDriver();
2963 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2964 errorText_ = errorStream_.str();
2965 error( RtAudioError::WARNING );
2969 info.nativeFormats = 0;
2970 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2971 info.nativeFormats |= RTAUDIO_SINT16;
2972 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2973 info.nativeFormats |= RTAUDIO_SINT32;
2974 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2975 info.nativeFormats |= RTAUDIO_FLOAT32;
2976 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2977 info.nativeFormats |= RTAUDIO_FLOAT64;
2978 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2979 info.nativeFormats |= RTAUDIO_SINT24;
2981 if ( info.outputChannels > 0 )
2982 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2983 if ( info.inputChannels > 0 )
2984 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2987 drivers.removeCurrentDriver();
2991 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2993 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2994 object->callbackEvent( index );
2997 void RtApiAsio :: saveDeviceInfo( void )
3001 unsigned int nDevices = getDeviceCount();
3002 devices_.resize( nDevices );
3003 for ( unsigned int i=0; i<nDevices; i++ )
3004 devices_[i] = getDeviceInfo( i );
3007 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3008 unsigned int firstChannel, unsigned int sampleRate,
3009 RtAudioFormat format, unsigned int *bufferSize,
3010 RtAudio::StreamOptions *options )
3011 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3013 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3015 // For ASIO, a duplex stream MUST use the same driver.
3016 if ( isDuplexInput && stream_.device[0] != device ) {
3017 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3021 char driverName[32];
3022 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3023 if ( result != ASE_OK ) {
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3025 errorText_ = errorStream_.str();
3029 // Only load the driver once for duplex stream.
3030 if ( !isDuplexInput ) {
3031 // The getDeviceInfo() function will not work when a stream is open
3032 // because ASIO does not allow multiple devices to run at the same
3033 // time. Thus, we'll probe the system before opening a stream and
3034 // save the results for use by getDeviceInfo().
3035 this->saveDeviceInfo();
3037 if ( !drivers.loadDriver( driverName ) ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3039 errorText_ = errorStream_.str();
3043 result = ASIOInit( &driverInfo );
3044 if ( result != ASE_OK ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3046 errorText_ = errorStream_.str();
3051 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3052 bool buffersAllocated = false;
3053 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3054 unsigned int nChannels;
3057 // Check the device channel count.
3058 long inputChannels, outputChannels;
3059 result = ASIOGetChannels( &inputChannels, &outputChannels );
3060 if ( result != ASE_OK ) {
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3062 errorText_ = errorStream_.str();
3066 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3067 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3068 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3069 errorText_ = errorStream_.str();
3072 stream_.nDeviceChannels[mode] = channels;
3073 stream_.nUserChannels[mode] = channels;
3074 stream_.channelOffset[mode] = firstChannel;
3076 // Verify the sample rate is supported.
3077 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3078 if ( result != ASE_OK ) {
3079 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3080 errorText_ = errorStream_.str();
3084 // Get the current sample rate
3085 ASIOSampleRate currentRate;
3086 result = ASIOGetSampleRate( ¤tRate );
3087 if ( result != ASE_OK ) {
3088 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3089 errorText_ = errorStream_.str();
3093 // Set the sample rate only if necessary
3094 if ( currentRate != sampleRate ) {
3095 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3096 if ( result != ASE_OK ) {
3097 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3098 errorText_ = errorStream_.str();
3103 // Determine the driver data type.
3104 ASIOChannelInfo channelInfo;
3105 channelInfo.channel = 0;
3106 if ( mode == OUTPUT ) channelInfo.isInput = false;
3107 else channelInfo.isInput = true;
3108 result = ASIOGetChannelInfo( &channelInfo );
3109 if ( result != ASE_OK ) {
3110 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3111 errorText_ = errorStream_.str();
3115 // Assuming WINDOWS host is always little-endian.
3116 stream_.doByteSwap[mode] = false;
3117 stream_.userFormat = format;
3118 stream_.deviceFormat[mode] = 0;
3119 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3120 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3121 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3123 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3124 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3125 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3127 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3128 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3129 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3131 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3132 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3133 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3135 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3136 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3137 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3140 if ( stream_.deviceFormat[mode] == 0 ) {
3141 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3142 errorText_ = errorStream_.str();
3146 // Set the buffer size. For a duplex stream, this will end up
3147 // setting the buffer size based on the input constraints, which
3149 long minSize, maxSize, preferSize, granularity;
3150 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3151 if ( result != ASE_OK ) {
3152 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3153 errorText_ = errorStream_.str();
3157 if ( isDuplexInput ) {
3158 // When this is the duplex input (output was opened before), then we have to use the same
3159 // buffersize as the output, because it might use the preferred buffer size, which most
3160 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3161 // So instead of throwing an error, make them equal. The caller uses the reference
3162 // to the "bufferSize" param as usual to set up processing buffers.
3164 *bufferSize = stream_.bufferSize;
3167 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3168 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3169 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3170 else if ( granularity == -1 ) {
3171 // Make sure bufferSize is a power of two.
3172 int log2_of_min_size = 0;
3173 int log2_of_max_size = 0;
3175 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3176 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3177 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3180 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3181 int min_delta_num = log2_of_min_size;
3183 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3184 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3185 if (current_delta < min_delta) {
3186 min_delta = current_delta;
3191 *bufferSize = ( (unsigned int)1 << min_delta_num );
3192 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3193 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3195 else if ( granularity != 0 ) {
3196 // Set to an even multiple of granularity, rounding up.
3197 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3202 // we don't use it anymore, see above!
3203 // Just left it here for the case...
3204 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3205 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3210 stream_.bufferSize = *bufferSize;
3211 stream_.nBuffers = 2;
3213 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3214 else stream_.userInterleaved = true;
3216 // ASIO always uses non-interleaved buffers.
3217 stream_.deviceInterleaved[mode] = false;
3219 // Allocate, if necessary, our AsioHandle structure for the stream.
3220 if ( handle == 0 ) {
3222 handle = new AsioHandle;
3224 catch ( std::bad_alloc& ) {
3225 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3228 handle->bufferInfos = 0;
3230 // Create a manual-reset event.
3231 handle->condition = CreateEvent( NULL, // no security
3232 TRUE, // manual-reset
3233 FALSE, // non-signaled initially
3235 stream_.apiHandle = (void *) handle;
3238 // Create the ASIO internal buffers. Since RtAudio sets up input
3239 // and output separately, we'll have to dispose of previously
3240 // created output buffers for a duplex stream.
3241 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3242 ASIODisposeBuffers();
3243 if ( handle->bufferInfos ) free( handle->bufferInfos );
3246 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3248 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3249 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3250 if ( handle->bufferInfos == NULL ) {
3251 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3252 errorText_ = errorStream_.str();
3256 ASIOBufferInfo *infos;
3257 infos = handle->bufferInfos;
3258 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3259 infos->isInput = ASIOFalse;
3260 infos->channelNum = i + stream_.channelOffset[0];
3261 infos->buffers[0] = infos->buffers[1] = 0;
3263 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3264 infos->isInput = ASIOTrue;
3265 infos->channelNum = i + stream_.channelOffset[1];
3266 infos->buffers[0] = infos->buffers[1] = 0;
3269 // prepare for callbacks
3270 stream_.sampleRate = sampleRate;
3271 stream_.device[mode] = device;
3272 stream_.mode = isDuplexInput ? DUPLEX : mode;
3274 // store this class instance before registering callbacks, that are going to use it
3275 asioCallbackInfo = &stream_.callbackInfo;
3276 stream_.callbackInfo.object = (void *) this;
3278 // Set up the ASIO callback structure and create the ASIO data buffers.
3279 asioCallbacks.bufferSwitch = &bufferSwitch;
3280 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3281 asioCallbacks.asioMessage = &asioMessages;
3282 asioCallbacks.bufferSwitchTimeInfo = NULL;
3283 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3284 if ( result != ASE_OK ) {
3285 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3286 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3287 // In that case, let's be naïve and try that instead.
3288 *bufferSize = preferSize;
3289 stream_.bufferSize = *bufferSize;
3290 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3293 if ( result != ASE_OK ) {
3294 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3295 errorText_ = errorStream_.str();
3298 buffersAllocated = true;
3299 stream_.state = STREAM_STOPPED;
3301 // Set flags for buffer conversion.
3302 stream_.doConvertBuffer[mode] = false;
3303 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3304 stream_.doConvertBuffer[mode] = true;
3305 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3306 stream_.nUserChannels[mode] > 1 )
3307 stream_.doConvertBuffer[mode] = true;
3309 // Allocate necessary internal buffers
3310 unsigned long bufferBytes;
3311 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3312 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3313 if ( stream_.userBuffer[mode] == NULL ) {
3314 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3318 if ( stream_.doConvertBuffer[mode] ) {
3320 bool makeBuffer = true;
3321 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3322 if ( isDuplexInput && stream_.deviceBuffer ) {
3323 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3324 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3328 bufferBytes *= *bufferSize;
3329 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3330 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3331 if ( stream_.deviceBuffer == NULL ) {
3332 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3338 // Determine device latencies
3339 long inputLatency, outputLatency;
3340 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3341 if ( result != ASE_OK ) {
3342 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3343 errorText_ = errorStream_.str();
3344 error( RtAudioError::WARNING); // warn but don't fail
3347 stream_.latency[0] = outputLatency;
3348 stream_.latency[1] = inputLatency;
3351 // Setup the buffer conversion information structure. We don't use
3352 // buffers to do channel offsets, so we override that parameter
3354 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3359 if ( !isDuplexInput ) {
3360 // the cleanup for error in the duplex input, is done by RtApi::openStream
3361 // So we clean up for single channel only
3363 if ( buffersAllocated )
3364 ASIODisposeBuffers();
3366 drivers.removeCurrentDriver();
3369 CloseHandle( handle->condition );
3370 if ( handle->bufferInfos )
3371 free( handle->bufferInfos );
3374 stream_.apiHandle = 0;
3378 if ( stream_.userBuffer[mode] ) {
3379 free( stream_.userBuffer[mode] );
3380 stream_.userBuffer[mode] = 0;
3383 if ( stream_.deviceBuffer ) {
3384 free( stream_.deviceBuffer );
3385 stream_.deviceBuffer = 0;
3390 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3392 void RtApiAsio :: closeStream()
3394 if ( stream_.state == STREAM_CLOSED ) {
3395 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3396 error( RtAudioError::WARNING );
3400 if ( stream_.state == STREAM_RUNNING ) {
3401 stream_.state = STREAM_STOPPED;
3404 ASIODisposeBuffers();
3405 drivers.removeCurrentDriver();
3407 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3409 CloseHandle( handle->condition );
3410 if ( handle->bufferInfos )
3411 free( handle->bufferInfos );
3413 stream_.apiHandle = 0;
3416 for ( int i=0; i<2; i++ ) {
3417 if ( stream_.userBuffer[i] ) {
3418 free( stream_.userBuffer[i] );
3419 stream_.userBuffer[i] = 0;
3423 if ( stream_.deviceBuffer ) {
3424 free( stream_.deviceBuffer );
3425 stream_.deviceBuffer = 0;
3428 stream_.mode = UNINITIALIZED;
3429 stream_.state = STREAM_CLOSED;
3432 bool stopThreadCalled = false;
3434 void RtApiAsio :: startStream()
3437 if ( stream_.state == STREAM_RUNNING ) {
3438 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3439 error( RtAudioError::WARNING );
3443 #if defined( HAVE_GETTIMEOFDAY )
3444 gettimeofday( &stream_.lastTickTimestamp, NULL );
3447 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3448 ASIOError result = ASIOStart();
3449 if ( result != ASE_OK ) {
3450 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3451 errorText_ = errorStream_.str();
3455 handle->drainCounter = 0;
3456 handle->internalDrain = false;
3457 ResetEvent( handle->condition );
3458 stream_.state = STREAM_RUNNING;
3462 stopThreadCalled = false;
3464 if ( result == ASE_OK ) return;
3465 error( RtAudioError::SYSTEM_ERROR );
3468 void RtApiAsio :: stopStream()
3471 if ( stream_.state == STREAM_STOPPED ) {
3472 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3473 error( RtAudioError::WARNING );
3477 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3478 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3479 if ( handle->drainCounter == 0 ) {
3480 handle->drainCounter = 2;
3481 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3485 stream_.state = STREAM_STOPPED;
3487 ASIOError result = ASIOStop();
3488 if ( result != ASE_OK ) {
3489 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3490 errorText_ = errorStream_.str();
3493 if ( result == ASE_OK ) return;
3494 error( RtAudioError::SYSTEM_ERROR );
3497 void RtApiAsio :: abortStream()
3500 if ( stream_.state == STREAM_STOPPED ) {
3501 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3502 error( RtAudioError::WARNING );
3506 // The following lines were commented-out because some behavior was
3507 // noted where the device buffers need to be zeroed to avoid
3508 // continuing sound, even when the device buffers are completely
3509 // disposed. So now, calling abort is the same as calling stop.
3510 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3511 // handle->drainCounter = 2;
3515 // This function will be called by a spawned thread when the user
3516 // callback function signals that the stream should be stopped or
3517 // aborted. It is necessary to handle it this way because the
3518 // callbackEvent() function must return before the ASIOStop()
3519 // function will return.
3520 static unsigned __stdcall asioStopStream( void *ptr )
3522 CallbackInfo *info = (CallbackInfo *) ptr;
3523 RtApiAsio *object = (RtApiAsio *) info->object;
3525 object->stopStream();
3530 bool RtApiAsio :: callbackEvent( long bufferIndex )
3532 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3533 if ( stream_.state == STREAM_CLOSED ) {
3534 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3535 error( RtAudioError::WARNING );
3539 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3540 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3542 // Check if we were draining the stream and signal if finished.
3543 if ( handle->drainCounter > 3 ) {
3545 stream_.state = STREAM_STOPPING;
3546 if ( handle->internalDrain == false )
3547 SetEvent( handle->condition );
3548 else { // spawn a thread to stop the stream
3550 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3551 &stream_.callbackInfo, 0, &threadId );
3556 // Invoke user callback to get fresh output data UNLESS we are
3558 if ( handle->drainCounter == 0 ) {
3559 RtAudioCallback callback = (RtAudioCallback) info->callback;
3560 double streamTime = getStreamTime();
3561 RtAudioStreamStatus status = 0;
3562 if ( stream_.mode != INPUT && asioXRun == true ) {
3563 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3566 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3567 status |= RTAUDIO_INPUT_OVERFLOW;
3570 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3571 stream_.bufferSize, streamTime, status, info->userData );
3572 if ( cbReturnValue == 2 ) {
3573 stream_.state = STREAM_STOPPING;
3574 handle->drainCounter = 2;
3576 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3577 &stream_.callbackInfo, 0, &threadId );
3580 else if ( cbReturnValue == 1 ) {
3581 handle->drainCounter = 1;
3582 handle->internalDrain = true;
3586 unsigned int nChannels, bufferBytes, i, j;
3587 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3588 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3590 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3592 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3594 for ( i=0, j=0; i<nChannels; i++ ) {
3595 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3596 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3600 else if ( stream_.doConvertBuffer[0] ) {
3602 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3603 if ( stream_.doByteSwap[0] )
3604 byteSwapBuffer( stream_.deviceBuffer,
3605 stream_.bufferSize * stream_.nDeviceChannels[0],
3606 stream_.deviceFormat[0] );
3608 for ( i=0, j=0; i<nChannels; i++ ) {
3609 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3610 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3611 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3617 if ( stream_.doByteSwap[0] )
3618 byteSwapBuffer( stream_.userBuffer[0],
3619 stream_.bufferSize * stream_.nUserChannels[0],
3620 stream_.userFormat );
3622 for ( i=0, j=0; i<nChannels; i++ ) {
3623 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3624 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3625 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3631 // Don't bother draining input
3632 if ( handle->drainCounter ) {
3633 handle->drainCounter++;
3637 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3639 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3641 if (stream_.doConvertBuffer[1]) {
3643 // Always interleave ASIO input data.
3644 for ( i=0, j=0; i<nChannels; i++ ) {
3645 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3646 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3647 handle->bufferInfos[i].buffers[bufferIndex],
3651 if ( stream_.doByteSwap[1] )
3652 byteSwapBuffer( stream_.deviceBuffer,
3653 stream_.bufferSize * stream_.nDeviceChannels[1],
3654 stream_.deviceFormat[1] );
3655 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3659 for ( i=0, j=0; i<nChannels; i++ ) {
3660 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3661 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3662 handle->bufferInfos[i].buffers[bufferIndex],
3667 if ( stream_.doByteSwap[1] )
3668 byteSwapBuffer( stream_.userBuffer[1],
3669 stream_.bufferSize * stream_.nUserChannels[1],
3670 stream_.userFormat );
3675 // The following call was suggested by Malte Clasen. While the API
3676 // documentation indicates it should not be required, some device
3677 // drivers apparently do not function correctly without it.
3680 RtApi::tickStreamTime();
3684 static void sampleRateChanged( ASIOSampleRate sRate )
3686 // The ASIO documentation says that this usually only happens during
3687 // external sync. Audio processing is not stopped by the driver,
3688 // actual sample rate might not have even changed, maybe only the
3689 // sample rate status of an AES/EBU or S/PDIF digital input at the
3692 RtApi *object = (RtApi *) asioCallbackInfo->object;
3694 object->stopStream();
3696 catch ( RtAudioError &exception ) {
3697 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3701 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3704 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3708 switch( selector ) {
3709 case kAsioSelectorSupported:
3710 if ( value == kAsioResetRequest
3711 || value == kAsioEngineVersion
3712 || value == kAsioResyncRequest
3713 || value == kAsioLatenciesChanged
3714 // The following three were added for ASIO 2.0, you don't
3715 // necessarily have to support them.
3716 || value == kAsioSupportsTimeInfo
3717 || value == kAsioSupportsTimeCode
3718 || value == kAsioSupportsInputMonitor)
3721 case kAsioResetRequest:
3722 // Defer the task and perform the reset of the driver during the
3723 // next "safe" situation. You cannot reset the driver right now,
3724 // as this code is called from the driver. Reset the driver is
3725 // done by completely destruct is. I.e. ASIOStop(),
3726 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3728 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3731 case kAsioResyncRequest:
3732 // This informs the application that the driver encountered some
3733 // non-fatal data loss. It is used for synchronization purposes
3734 // of different media. Added mainly to work around the Win16Mutex
3735 // problems in Windows 95/98 with the Windows Multimedia system,
3736 // which could lose data because the Mutex was held too long by
3737 // another thread. However a driver can issue it in other
3739 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3743 case kAsioLatenciesChanged:
3744 // This will inform the host application that the drivers were
3745 // latencies changed. Beware, it this does not mean that the
3746 // buffer sizes have changed! You might need to update internal
3748 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3751 case kAsioEngineVersion:
3752 // Return the supported ASIO version of the host application. If
3753 // a host application does not implement this selector, ASIO 1.0
3754 // is assumed by the driver.
3757 case kAsioSupportsTimeInfo:
3758 // Informs the driver whether the
3759 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3760 // For compatibility with ASIO 1.0 drivers the host application
3761 // should always support the "old" bufferSwitch method, too.
3764 case kAsioSupportsTimeCode:
3765 // Informs the driver whether application is interested in time
3766 // code info. If an application does not need to know about time
3767 // code, the driver has less work to do.
3774 static const char* getAsioErrorString( ASIOError result )
3782 static const Messages m[] =
3784 { ASE_NotPresent, "Hardware input or output is not present or available." },
3785 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3786 { ASE_InvalidParameter, "Invalid input parameter." },
3787 { ASE_InvalidMode, "Invalid mode." },
3788 { ASE_SPNotAdvancing, "Sample position not advancing." },
3789 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3790 { ASE_NoMemory, "Not enough memory to complete the request." }
3793 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3794 if ( m[i].value == result ) return m[i].message;
3796 return "Unknown error.";
3799 //******************** End of __WINDOWS_ASIO__ *********************//
3803 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3805 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3806 // - Introduces support for the Windows WASAPI API
3807 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3808 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3809 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3816 #include <mferror.h>
3818 #include <mftransform.h>
3819 #include <wmcodecdsp.h>
3821 #include <audioclient.h>
3823 #include <mmdeviceapi.h>
3824 #include <functiondiscoverykeys_devpkey.h>
3826 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3827 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3830 #ifndef MFSTARTUP_NOSOCKET
3831 #define MFSTARTUP_NOSOCKET 0x1
3835 #pragma comment( lib, "ksuser" )
3836 #pragma comment( lib, "mfplat.lib" )
3837 #pragma comment( lib, "mfuuid.lib" )
3838 #pragma comment( lib, "wmcodecdspuuid" )
3841 //=============================================================================
3843 #define SAFE_RELEASE( objectPtr )\
3846 objectPtr->Release();\
3850 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3852 //-----------------------------------------------------------------------------
3854 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3855 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3856 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3857 // provide intermediate storage for read / write synchronization.
3871 // sets the length of the internal ring buffer
3872 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3875 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3877 bufferSize_ = bufferSize;
3882 // attempt to push a buffer into the ring buffer at the current "in" index
3883 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3885 if ( !buffer || // incoming buffer is NULL
3886 bufferSize == 0 || // incoming buffer has no data
3887 bufferSize > bufferSize_ ) // incoming buffer too large
3892 unsigned int relOutIndex = outIndex_;
3893 unsigned int inIndexEnd = inIndex_ + bufferSize;
3894 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3895 relOutIndex += bufferSize_;
3898 // the "IN" index CAN BEGIN at the "OUT" index
3899 // the "IN" index CANNOT END at the "OUT" index
3900 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3901 return false; // not enough space between "in" index and "out" index
3904 // copy buffer from external to internal
3905 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3906 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3907 int fromInSize = bufferSize - fromZeroSize;
3912 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3913 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3915 case RTAUDIO_SINT16:
3916 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3917 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3919 case RTAUDIO_SINT24:
3920 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3921 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3923 case RTAUDIO_SINT32:
3924 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3925 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3927 case RTAUDIO_FLOAT32:
3928 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3929 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3931 case RTAUDIO_FLOAT64:
3932 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3933 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3937 // update "in" index
3938 inIndex_ += bufferSize;
3939 inIndex_ %= bufferSize_;
3944 // attempt to pull a buffer from the ring buffer from the current "out" index
3945 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3947 if ( !buffer || // incoming buffer is NULL
3948 bufferSize == 0 || // incoming buffer has no data
3949 bufferSize > bufferSize_ ) // incoming buffer too large
3954 unsigned int relInIndex = inIndex_;
3955 unsigned int outIndexEnd = outIndex_ + bufferSize;
3956 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3957 relInIndex += bufferSize_;
3960 // the "OUT" index CANNOT BEGIN at the "IN" index
3961 // the "OUT" index CAN END at the "IN" index
3962 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3963 return false; // not enough space between "out" index and "in" index
3966 // copy buffer from internal to external
3967 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3968 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3969 int fromOutSize = bufferSize - fromZeroSize;
3974 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3975 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3977 case RTAUDIO_SINT16:
3978 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3979 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3981 case RTAUDIO_SINT24:
3982 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3983 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3985 case RTAUDIO_SINT32:
3986 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3987 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3989 case RTAUDIO_FLOAT32:
3990 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3991 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3993 case RTAUDIO_FLOAT64:
3994 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3995 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3999 // update "out" index
4000 outIndex_ += bufferSize;
4001 outIndex_ %= bufferSize_;
4008 unsigned int bufferSize_;
4009 unsigned int inIndex_;
4010 unsigned int outIndex_;
4013 //-----------------------------------------------------------------------------
4015 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4016 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4017 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4018 class WasapiResampler
4021 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4022 unsigned int inSampleRate, unsigned int outSampleRate )
4023 : _bytesPerSample( bitsPerSample / 8 )
4024 , _channelCount( channelCount )
4025 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4026 , _transformUnk( NULL )
4027 , _transform( NULL )
4028 , _mediaType( NULL )
4029 , _inputMediaType( NULL )
4030 , _outputMediaType( NULL )
4032 #ifdef __IWMResamplerProps_FWD_DEFINED__
4033 , _resamplerProps( NULL )
4036 // 1. Initialization
4038 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4040 // 2. Create Resampler Transform Object
4042 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4043 IID_IUnknown, ( void** ) &_transformUnk );
4045 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4047 #ifdef __IWMResamplerProps_FWD_DEFINED__
4048 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4049 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4052 // 3. Specify input / output format
4054 MFCreateMediaType( &_mediaType );
4055 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4056 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4057 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4058 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4059 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4060 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4061 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4062 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4064 MFCreateMediaType( &_inputMediaType );
4065 _mediaType->CopyAllItems( _inputMediaType );
4067 _transform->SetInputType( 0, _inputMediaType, 0 );
4069 MFCreateMediaType( &_outputMediaType );
4070 _mediaType->CopyAllItems( _outputMediaType );
4072 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4073 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4075 _transform->SetOutputType( 0, _outputMediaType, 0 );
4077 // 4. Send stream start messages to Resampler
4079 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4080 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4081 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4086 // 8. Send stream stop messages to Resampler
4088 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4089 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4095 SAFE_RELEASE( _transformUnk );
4096 SAFE_RELEASE( _transform );
4097 SAFE_RELEASE( _mediaType );
4098 SAFE_RELEASE( _inputMediaType );
4099 SAFE_RELEASE( _outputMediaType );
4101 #ifdef __IWMResamplerProps_FWD_DEFINED__
4102 SAFE_RELEASE( _resamplerProps );
4106 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4108 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4109 if ( _sampleRatio == 1 )
4111 // no sample rate conversion required
4112 memcpy( outBuffer, inBuffer, inputBufferSize );
4113 outSampleCount = inSampleCount;
4117 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4119 IMFMediaBuffer* rInBuffer;
4120 IMFSample* rInSample;
4121 BYTE* rInByteBuffer = NULL;
4123 // 5. Create Sample object from input data
4125 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4127 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4128 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4129 rInBuffer->Unlock();
4130 rInByteBuffer = NULL;
4132 rInBuffer->SetCurrentLength( inputBufferSize );
4134 MFCreateSample( &rInSample );
4135 rInSample->AddBuffer( rInBuffer );
4137 // 6. Pass input data to Resampler
4139 _transform->ProcessInput( 0, rInSample, 0 );
4141 SAFE_RELEASE( rInBuffer );
4142 SAFE_RELEASE( rInSample );
4144 // 7. Perform sample rate conversion
4146 IMFMediaBuffer* rOutBuffer = NULL;
4147 BYTE* rOutByteBuffer = NULL;
4149 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4151 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4153 // 7.1 Create Sample object for output data
4155 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4156 MFCreateSample( &( rOutDataBuffer.pSample ) );
4157 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4158 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4159 rOutDataBuffer.dwStreamID = 0;
4160 rOutDataBuffer.dwStatus = 0;
4161 rOutDataBuffer.pEvents = NULL;
4163 // 7.2 Get output data from Resampler
4165 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4168 SAFE_RELEASE( rOutBuffer );
4169 SAFE_RELEASE( rOutDataBuffer.pSample );
4173 // 7.3 Write output data to outBuffer
4175 SAFE_RELEASE( rOutBuffer );
4176 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4177 rOutBuffer->GetCurrentLength( &rBytes );
4179 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4180 memcpy( outBuffer, rOutByteBuffer, rBytes );
4181 rOutBuffer->Unlock();
4182 rOutByteBuffer = NULL;
4184 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4185 SAFE_RELEASE( rOutBuffer );
4186 SAFE_RELEASE( rOutDataBuffer.pSample );
4190 unsigned int _bytesPerSample;
4191 unsigned int _channelCount;
4194 IUnknown* _transformUnk;
4195 IMFTransform* _transform;
4196 IMFMediaType* _mediaType;
4197 IMFMediaType* _inputMediaType;
4198 IMFMediaType* _outputMediaType;
4200 #ifdef __IWMResamplerProps_FWD_DEFINED__
4201 IWMResamplerProps* _resamplerProps;
4205 //-----------------------------------------------------------------------------
4207 // A structure to hold various information related to the WASAPI implementation.
4210 IAudioClient* captureAudioClient;
4211 IAudioClient* renderAudioClient;
4212 IAudioCaptureClient* captureClient;
4213 IAudioRenderClient* renderClient;
4214 HANDLE captureEvent;
4218 : captureAudioClient( NULL ),
4219 renderAudioClient( NULL ),
4220 captureClient( NULL ),
4221 renderClient( NULL ),
4222 captureEvent( NULL ),
4223 renderEvent( NULL ) {}
4226 //=============================================================================
4228 RtApiWasapi::RtApiWasapi()
4229 : coInitialized_( false ), deviceEnumerator_( NULL )
4231 // WASAPI can run either apartment or multi-threaded
4232 HRESULT hr = CoInitialize( NULL );
4233 if ( !FAILED( hr ) )
4234 coInitialized_ = true;
4236 // Instantiate device enumerator
4237 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4238 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4239 ( void** ) &deviceEnumerator_ );
4241 // If this runs on an old Windows, it will fail. Ignore and proceed.
4243 deviceEnumerator_ = NULL;
4246 //-----------------------------------------------------------------------------
4248 RtApiWasapi::~RtApiWasapi()
4250 if ( stream_.state != STREAM_CLOSED )
4253 SAFE_RELEASE( deviceEnumerator_ );
4255 // If this object previously called CoInitialize()
4256 if ( coInitialized_ )
4260 //=============================================================================
4262 unsigned int RtApiWasapi::getDeviceCount( void )
4264 unsigned int captureDeviceCount = 0;
4265 unsigned int renderDeviceCount = 0;
4267 IMMDeviceCollection* captureDevices = NULL;
4268 IMMDeviceCollection* renderDevices = NULL;
4270 if ( !deviceEnumerator_ )
4273 // Count capture devices
4275 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4276 if ( FAILED( hr ) ) {
4277 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4281 hr = captureDevices->GetCount( &captureDeviceCount );
4282 if ( FAILED( hr ) ) {
4283 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4287 // Count render devices
4288 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4289 if ( FAILED( hr ) ) {
4290 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4294 hr = renderDevices->GetCount( &renderDeviceCount );
4295 if ( FAILED( hr ) ) {
4296 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4301 // release all references
4302 SAFE_RELEASE( captureDevices );
4303 SAFE_RELEASE( renderDevices );
4305 if ( errorText_.empty() )
4306 return captureDeviceCount + renderDeviceCount;
4308 error( RtAudioError::DRIVER_ERROR );
4312 //-----------------------------------------------------------------------------
4314 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4316 RtAudio::DeviceInfo info;
4317 unsigned int captureDeviceCount = 0;
4318 unsigned int renderDeviceCount = 0;
4319 std::string defaultDeviceName;
4320 bool isCaptureDevice = false;
4322 PROPVARIANT deviceNameProp;
4323 PROPVARIANT defaultDeviceNameProp;
4325 IMMDeviceCollection* captureDevices = NULL;
4326 IMMDeviceCollection* renderDevices = NULL;
4327 IMMDevice* devicePtr = NULL;
4328 IMMDevice* defaultDevicePtr = NULL;
4329 IAudioClient* audioClient = NULL;
4330 IPropertyStore* devicePropStore = NULL;
4331 IPropertyStore* defaultDevicePropStore = NULL;
4333 WAVEFORMATEX* deviceFormat = NULL;
4334 WAVEFORMATEX* closestMatchFormat = NULL;
4337 info.probed = false;
4339 // Count capture devices
4341 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4342 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4343 if ( FAILED( hr ) ) {
4344 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4348 hr = captureDevices->GetCount( &captureDeviceCount );
4349 if ( FAILED( hr ) ) {
4350 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4354 // Count render devices
4355 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4356 if ( FAILED( hr ) ) {
4357 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4361 hr = renderDevices->GetCount( &renderDeviceCount );
4362 if ( FAILED( hr ) ) {
4363 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4367 // validate device index
4368 if ( device >= captureDeviceCount + renderDeviceCount ) {
4369 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4370 errorType = RtAudioError::INVALID_USE;
4374 // determine whether index falls within capture or render devices
4375 if ( device >= renderDeviceCount ) {
4376 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4377 if ( FAILED( hr ) ) {
4378 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4381 isCaptureDevice = true;
4384 hr = renderDevices->Item( device, &devicePtr );
4385 if ( FAILED( hr ) ) {
4386 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4389 isCaptureDevice = false;
4392 // get default device name
4393 if ( isCaptureDevice ) {
4394 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4395 if ( FAILED( hr ) ) {
4396 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4401 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4408 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4409 if ( FAILED( hr ) ) {
4410 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4413 PropVariantInit( &defaultDeviceNameProp );
4415 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4416 if ( FAILED( hr ) ) {
4417 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4421 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4424 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4425 if ( FAILED( hr ) ) {
4426 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4430 PropVariantInit( &deviceNameProp );
4432 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4433 if ( FAILED( hr ) ) {
4434 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4438 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4441 if ( isCaptureDevice ) {
4442 info.isDefaultInput = info.name == defaultDeviceName;
4443 info.isDefaultOutput = false;
4446 info.isDefaultInput = false;
4447 info.isDefaultOutput = info.name == defaultDeviceName;
4451 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4452 if ( FAILED( hr ) ) {
4453 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4457 hr = audioClient->GetMixFormat( &deviceFormat );
4458 if ( FAILED( hr ) ) {
4459 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4463 if ( isCaptureDevice ) {
4464 info.inputChannels = deviceFormat->nChannels;
4465 info.outputChannels = 0;
4466 info.duplexChannels = 0;
4469 info.inputChannels = 0;
4470 info.outputChannels = deviceFormat->nChannels;
4471 info.duplexChannels = 0;
4475 info.sampleRates.clear();
4477 // allow support for all sample rates as we have a built-in sample rate converter
4478 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4479 info.sampleRates.push_back( SAMPLE_RATES[i] );
4481 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4484 info.nativeFormats = 0;
4486 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4487 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4488 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4490 if ( deviceFormat->wBitsPerSample == 32 ) {
4491 info.nativeFormats |= RTAUDIO_FLOAT32;
4493 else if ( deviceFormat->wBitsPerSample == 64 ) {
4494 info.nativeFormats |= RTAUDIO_FLOAT64;
4497 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4498 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4499 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4501 if ( deviceFormat->wBitsPerSample == 8 ) {
4502 info.nativeFormats |= RTAUDIO_SINT8;
4504 else if ( deviceFormat->wBitsPerSample == 16 ) {
4505 info.nativeFormats |= RTAUDIO_SINT16;
4507 else if ( deviceFormat->wBitsPerSample == 24 ) {
4508 info.nativeFormats |= RTAUDIO_SINT24;
4510 else if ( deviceFormat->wBitsPerSample == 32 ) {
4511 info.nativeFormats |= RTAUDIO_SINT32;
4519 // release all references
4520 PropVariantClear( &deviceNameProp );
4521 PropVariantClear( &defaultDeviceNameProp );
4523 SAFE_RELEASE( captureDevices );
4524 SAFE_RELEASE( renderDevices );
4525 SAFE_RELEASE( devicePtr );
4526 SAFE_RELEASE( defaultDevicePtr );
4527 SAFE_RELEASE( audioClient );
4528 SAFE_RELEASE( devicePropStore );
4529 SAFE_RELEASE( defaultDevicePropStore );
4531 CoTaskMemFree( deviceFormat );
4532 CoTaskMemFree( closestMatchFormat );
4534 if ( !errorText_.empty() )
4539 //-----------------------------------------------------------------------------
4541 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4543 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4544 if ( getDeviceInfo( i ).isDefaultOutput ) {
4552 //-----------------------------------------------------------------------------
4554 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4556 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4557 if ( getDeviceInfo( i ).isDefaultInput ) {
4565 //-----------------------------------------------------------------------------
4567 void RtApiWasapi::closeStream( void )
4569 if ( stream_.state == STREAM_CLOSED ) {
4570 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4571 error( RtAudioError::WARNING );
4575 if ( stream_.state != STREAM_STOPPED )
4578 // clean up stream memory
4579 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4580 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4582 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4583 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4585 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4586 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4588 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4589 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4591 delete ( WasapiHandle* ) stream_.apiHandle;
4592 stream_.apiHandle = NULL;
4594 for ( int i = 0; i < 2; i++ ) {
4595 if ( stream_.userBuffer[i] ) {
4596 free( stream_.userBuffer[i] );
4597 stream_.userBuffer[i] = 0;
4601 if ( stream_.deviceBuffer ) {
4602 free( stream_.deviceBuffer );
4603 stream_.deviceBuffer = 0;
4606 // update stream state
4607 stream_.state = STREAM_CLOSED;
4610 //-----------------------------------------------------------------------------
4612 void RtApiWasapi::startStream( void )
4616 if ( stream_.state == STREAM_RUNNING ) {
4617 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4618 error( RtAudioError::WARNING );
4622 #if defined( HAVE_GETTIMEOFDAY )
4623 gettimeofday( &stream_.lastTickTimestamp, NULL );
4626 // update stream state
4627 stream_.state = STREAM_RUNNING;
4629 // create WASAPI stream thread
4630 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4632 if ( !stream_.callbackInfo.thread ) {
4633 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4634 error( RtAudioError::THREAD_ERROR );
4637 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4638 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4642 //-----------------------------------------------------------------------------
4644 void RtApiWasapi::stopStream( void )
4648 if ( stream_.state == STREAM_STOPPED ) {
4649 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4650 error( RtAudioError::WARNING );
4654 // inform stream thread by setting stream state to STREAM_STOPPING
4655 stream_.state = STREAM_STOPPING;
4657 // wait until stream thread is stopped
4658 while( stream_.state != STREAM_STOPPED ) {
4662 // Wait for the last buffer to play before stopping.
4663 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4665 // close thread handle
4666 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4667 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4668 error( RtAudioError::THREAD_ERROR );
4672 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4675 //-----------------------------------------------------------------------------
4677 void RtApiWasapi::abortStream( void )
4681 if ( stream_.state == STREAM_STOPPED ) {
4682 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4683 error( RtAudioError::WARNING );
4687 // inform stream thread by setting stream state to STREAM_STOPPING
4688 stream_.state = STREAM_STOPPING;
4690 // wait until stream thread is stopped
4691 while ( stream_.state != STREAM_STOPPED ) {
4695 // close thread handle
4696 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4697 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4698 error( RtAudioError::THREAD_ERROR );
4702 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4705 //-----------------------------------------------------------------------------
4707 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4708 unsigned int firstChannel, unsigned int sampleRate,
4709 RtAudioFormat format, unsigned int* bufferSize,
4710 RtAudio::StreamOptions* options )
4712 bool methodResult = FAILURE;
4713 unsigned int captureDeviceCount = 0;
4714 unsigned int renderDeviceCount = 0;
4716 IMMDeviceCollection* captureDevices = NULL;
4717 IMMDeviceCollection* renderDevices = NULL;
4718 IMMDevice* devicePtr = NULL;
4719 WAVEFORMATEX* deviceFormat = NULL;
4720 unsigned int bufferBytes;
4721 stream_.state = STREAM_STOPPED;
4723 // create API Handle if not already created
4724 if ( !stream_.apiHandle )
4725 stream_.apiHandle = ( void* ) new WasapiHandle();
4727 // Count capture devices
4729 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4730 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4731 if ( FAILED( hr ) ) {
4732 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4736 hr = captureDevices->GetCount( &captureDeviceCount );
4737 if ( FAILED( hr ) ) {
4738 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4742 // Count render devices
4743 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4744 if ( FAILED( hr ) ) {
4745 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4749 hr = renderDevices->GetCount( &renderDeviceCount );
4750 if ( FAILED( hr ) ) {
4751 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4755 // validate device index
4756 if ( device >= captureDeviceCount + renderDeviceCount ) {
4757 errorType = RtAudioError::INVALID_USE;
4758 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4762 // if device index falls within capture devices
4763 if ( device >= renderDeviceCount ) {
4764 if ( mode != INPUT ) {
4765 errorType = RtAudioError::INVALID_USE;
4766 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4770 // retrieve captureAudioClient from devicePtr
4771 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4773 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4774 if ( FAILED( hr ) ) {
4775 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4779 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4780 NULL, ( void** ) &captureAudioClient );
4781 if ( FAILED( hr ) ) {
4782 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4786 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4787 if ( FAILED( hr ) ) {
4788 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4792 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4793 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4796 // if device index falls within render devices and is configured for loopback
4797 if ( device < renderDeviceCount && mode == INPUT )
4799 // if renderAudioClient is not initialised, initialise it now
4800 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4801 if ( !renderAudioClient )
4803 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4806 // retrieve captureAudioClient from devicePtr
4807 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4809 hr = renderDevices->Item( device, &devicePtr );
4810 if ( FAILED( hr ) ) {
4811 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4815 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4816 NULL, ( void** ) &captureAudioClient );
4817 if ( FAILED( hr ) ) {
4818 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4822 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4823 if ( FAILED( hr ) ) {
4824 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4828 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4829 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4832 // if device index falls within render devices and is configured for output
4833 if ( device < renderDeviceCount && mode == OUTPUT )
4835 // if renderAudioClient is already initialised, don't initialise it again
4836 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4837 if ( renderAudioClient )
4839 methodResult = SUCCESS;
4843 hr = renderDevices->Item( device, &devicePtr );
4844 if ( FAILED( hr ) ) {
4845 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4849 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4850 NULL, ( void** ) &renderAudioClient );
4851 if ( FAILED( hr ) ) {
4852 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4856 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4857 if ( FAILED( hr ) ) {
4858 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4862 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4863 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4867 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4868 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4869 stream_.mode = DUPLEX;
4872 stream_.mode = mode;
4875 stream_.device[mode] = device;
4876 stream_.doByteSwap[mode] = false;
4877 stream_.sampleRate = sampleRate;
4878 stream_.bufferSize = *bufferSize;
4879 stream_.nBuffers = 1;
4880 stream_.nUserChannels[mode] = channels;
4881 stream_.channelOffset[mode] = firstChannel;
4882 stream_.userFormat = format;
4883 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4885 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4886 stream_.userInterleaved = false;
4888 stream_.userInterleaved = true;
4889 stream_.deviceInterleaved[mode] = true;
4891 // Set flags for buffer conversion.
4892 stream_.doConvertBuffer[mode] = false;
4893 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4894 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4895 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4896 stream_.doConvertBuffer[mode] = true;
4897 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4898 stream_.nUserChannels[mode] > 1 )
4899 stream_.doConvertBuffer[mode] = true;
4901 if ( stream_.doConvertBuffer[mode] )
4902 setConvertInfo( mode, 0 );
4904 // Allocate necessary internal buffers
4905 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4907 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4908 if ( !stream_.userBuffer[mode] ) {
4909 errorType = RtAudioError::MEMORY_ERROR;
4910 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4914 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4915 stream_.callbackInfo.priority = 15;
4917 stream_.callbackInfo.priority = 0;
4919 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4920 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4922 methodResult = SUCCESS;
4926 SAFE_RELEASE( captureDevices );
4927 SAFE_RELEASE( renderDevices );
4928 SAFE_RELEASE( devicePtr );
4929 CoTaskMemFree( deviceFormat );
4931 // if method failed, close the stream
4932 if ( methodResult == FAILURE )
4935 if ( !errorText_.empty() )
4937 return methodResult;
4940 //=============================================================================
4942 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4945 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4950 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4953 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4958 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4961 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4966 //-----------------------------------------------------------------------------
4968 void RtApiWasapi::wasapiThread()
4970 // as this is a new thread, we must CoInitialize it
4971 CoInitialize( NULL );
4975 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4976 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4977 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4978 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4979 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4980 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4982 WAVEFORMATEX* captureFormat = NULL;
4983 WAVEFORMATEX* renderFormat = NULL;
4984 float captureSrRatio = 0.0f;
4985 float renderSrRatio = 0.0f;
4986 WasapiBuffer captureBuffer;
4987 WasapiBuffer renderBuffer;
4988 WasapiResampler* captureResampler = NULL;
4989 WasapiResampler* renderResampler = NULL;
4991 // declare local stream variables
4992 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4993 BYTE* streamBuffer = NULL;
4994 unsigned long captureFlags = 0;
4995 unsigned int bufferFrameCount = 0;
4996 unsigned int numFramesPadding = 0;
4997 unsigned int convBufferSize = 0;
4998 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4999 bool callbackPushed = true;
5000 bool callbackPulled = false;
5001 bool callbackStopped = false;
5002 int callbackResult = 0;
5004 // convBuffer is used to store converted buffers between WASAPI and the user
5005 char* convBuffer = NULL;
5006 unsigned int convBuffSize = 0;
5007 unsigned int deviceBuffSize = 0;
5009 std::string errorText;
5010 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5012 // Attempt to assign "Pro Audio" characteristic to thread
5013 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5015 DWORD taskIndex = 0;
5016 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5017 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5018 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5019 FreeLibrary( AvrtDll );
5022 // start capture stream if applicable
5023 if ( captureAudioClient ) {
5024 hr = captureAudioClient->GetMixFormat( &captureFormat );
5025 if ( FAILED( hr ) ) {
5026 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5030 // init captureResampler
5031 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5032 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5033 captureFormat->nSamplesPerSec, stream_.sampleRate );
5035 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5037 if ( !captureClient ) {
5038 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5039 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5044 if ( FAILED( hr ) ) {
5045 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5049 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5050 ( void** ) &captureClient );
5051 if ( FAILED( hr ) ) {
5052 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5056 // don't configure captureEvent if in loopback mode
5057 if ( !loopbackEnabled )
5059 // configure captureEvent to trigger on every available capture buffer
5060 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5061 if ( !captureEvent ) {
5062 errorType = RtAudioError::SYSTEM_ERROR;
5063 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5067 hr = captureAudioClient->SetEventHandle( captureEvent );
5068 if ( FAILED( hr ) ) {
5069 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5073 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5076 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5078 // reset the capture stream
5079 hr = captureAudioClient->Reset();
5080 if ( FAILED( hr ) ) {
5081 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5085 // start the capture stream
5086 hr = captureAudioClient->Start();
5087 if ( FAILED( hr ) ) {
5088 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5093 unsigned int inBufferSize = 0;
5094 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5095 if ( FAILED( hr ) ) {
5096 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5100 // scale outBufferSize according to stream->user sample rate ratio
5101 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5102 inBufferSize *= stream_.nDeviceChannels[INPUT];
5104 // set captureBuffer size
5105 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5108 // start render stream if applicable
5109 if ( renderAudioClient ) {
5110 hr = renderAudioClient->GetMixFormat( &renderFormat );
5111 if ( FAILED( hr ) ) {
5112 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5116 // init renderResampler
5117 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5118 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5119 stream_.sampleRate, renderFormat->nSamplesPerSec );
5121 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5123 if ( !renderClient ) {
5124 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5125 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5130 if ( FAILED( hr ) ) {
5131 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5135 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5136 ( void** ) &renderClient );
5137 if ( FAILED( hr ) ) {
5138 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5142 // configure renderEvent to trigger on every available render buffer
5143 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5144 if ( !renderEvent ) {
5145 errorType = RtAudioError::SYSTEM_ERROR;
5146 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5150 hr = renderAudioClient->SetEventHandle( renderEvent );
5151 if ( FAILED( hr ) ) {
5152 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5156 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5157 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5159 // reset the render stream
5160 hr = renderAudioClient->Reset();
5161 if ( FAILED( hr ) ) {
5162 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5166 // start the render stream
5167 hr = renderAudioClient->Start();
5168 if ( FAILED( hr ) ) {
5169 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5174 unsigned int outBufferSize = 0;
5175 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5176 if ( FAILED( hr ) ) {
5177 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5181 // scale inBufferSize according to user->stream sample rate ratio
5182 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5183 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5185 // set renderBuffer size
5186 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5189 // malloc buffer memory
5190 if ( stream_.mode == INPUT )
5192 using namespace std; // for ceilf
5193 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5194 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5196 else if ( stream_.mode == OUTPUT )
5198 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5199 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5201 else if ( stream_.mode == DUPLEX )
5203 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5204 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5205 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5206 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5209 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5210 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5211 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5212 if ( !convBuffer || !stream_.deviceBuffer ) {
5213 errorType = RtAudioError::MEMORY_ERROR;
5214 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5218 // stream process loop
5219 while ( stream_.state != STREAM_STOPPING ) {
5220 if ( !callbackPulled ) {
5223 // 1. Pull callback buffer from inputBuffer
5224 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5225 // Convert callback buffer to user format
5227 if ( captureAudioClient )
5229 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5230 if ( captureSrRatio != 1 )
5232 // account for remainders
5237 while ( convBufferSize < stream_.bufferSize )
5239 // Pull callback buffer from inputBuffer
5240 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5241 samplesToPull * stream_.nDeviceChannels[INPUT],
5242 stream_.deviceFormat[INPUT] );
5244 if ( !callbackPulled )
5249 // Convert callback buffer to user sample rate
5250 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5251 unsigned int convSamples = 0;
5253 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5258 convBufferSize += convSamples;
5259 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5262 if ( callbackPulled )
5264 if ( stream_.doConvertBuffer[INPUT] ) {
5265 // Convert callback buffer to user format
5266 convertBuffer( stream_.userBuffer[INPUT],
5267 stream_.deviceBuffer,
5268 stream_.convertInfo[INPUT] );
5271 // no further conversion, simple copy deviceBuffer to userBuffer
5272 memcpy( stream_.userBuffer[INPUT],
5273 stream_.deviceBuffer,
5274 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5279 // if there is no capture stream, set callbackPulled flag
5280 callbackPulled = true;
5285 // 1. Execute user callback method
5286 // 2. Handle return value from callback
5288 // if callback has not requested the stream to stop
5289 if ( callbackPulled && !callbackStopped ) {
5290 // Execute user callback method
5291 callbackResult = callback( stream_.userBuffer[OUTPUT],
5292 stream_.userBuffer[INPUT],
5295 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5296 stream_.callbackInfo.userData );
5299 RtApi::tickStreamTime();
5301 // Handle return value from callback
5302 if ( callbackResult == 1 ) {
5303 // instantiate a thread to stop this thread
5304 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5305 if ( !threadHandle ) {
5306 errorType = RtAudioError::THREAD_ERROR;
5307 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5310 else if ( !CloseHandle( threadHandle ) ) {
5311 errorType = RtAudioError::THREAD_ERROR;
5312 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5316 callbackStopped = true;
5318 else if ( callbackResult == 2 ) {
5319 // instantiate a thread to stop this thread
5320 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5321 if ( !threadHandle ) {
5322 errorType = RtAudioError::THREAD_ERROR;
5323 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5326 else if ( !CloseHandle( threadHandle ) ) {
5327 errorType = RtAudioError::THREAD_ERROR;
5328 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5332 callbackStopped = true;
5339 // 1. Convert callback buffer to stream format
5340 // 2. Convert callback buffer to stream sample rate and channel count
5341 // 3. Push callback buffer into outputBuffer
5343 if ( renderAudioClient && callbackPulled )
5345 // if the last call to renderBuffer.PushBuffer() was successful
5346 if ( callbackPushed || convBufferSize == 0 )
5348 if ( stream_.doConvertBuffer[OUTPUT] )
5350 // Convert callback buffer to stream format
5351 convertBuffer( stream_.deviceBuffer,
5352 stream_.userBuffer[OUTPUT],
5353 stream_.convertInfo[OUTPUT] );
5357 // no further conversion, simple copy userBuffer to deviceBuffer
5358 memcpy( stream_.deviceBuffer,
5359 stream_.userBuffer[OUTPUT],
5360 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5363 // Convert callback buffer to stream sample rate
5364 renderResampler->Convert( convBuffer,
5365 stream_.deviceBuffer,
5370 // Push callback buffer into outputBuffer
5371 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5372 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5373 stream_.deviceFormat[OUTPUT] );
5376 // if there is no render stream, set callbackPushed flag
5377 callbackPushed = true;
5382 // 1. Get capture buffer from stream
5383 // 2. Push capture buffer into inputBuffer
5384 // 3. If 2. was successful: Release capture buffer
5386 if ( captureAudioClient ) {
5387 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5388 if ( !callbackPulled ) {
5389 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5392 // Get capture buffer from stream
5393 hr = captureClient->GetBuffer( &streamBuffer,
5395 &captureFlags, NULL, NULL );
5396 if ( FAILED( hr ) ) {
5397 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5401 if ( bufferFrameCount != 0 ) {
5402 // Push capture buffer into inputBuffer
5403 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5404 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5405 stream_.deviceFormat[INPUT] ) )
5407 // Release capture buffer
5408 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5409 if ( FAILED( hr ) ) {
5410 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5416 // Inform WASAPI that capture was unsuccessful
5417 hr = captureClient->ReleaseBuffer( 0 );
5418 if ( FAILED( hr ) ) {
5419 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5426 // Inform WASAPI that capture was unsuccessful
5427 hr = captureClient->ReleaseBuffer( 0 );
5428 if ( FAILED( hr ) ) {
5429 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5437 // 1. Get render buffer from stream
5438 // 2. Pull next buffer from outputBuffer
5439 // 3. If 2. was successful: Fill render buffer with next buffer
5440 // Release render buffer
5442 if ( renderAudioClient ) {
5443 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5444 if ( callbackPulled && !callbackPushed ) {
5445 WaitForSingleObject( renderEvent, INFINITE );
5448 // Get render buffer from stream
5449 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5450 if ( FAILED( hr ) ) {
5451 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5455 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5456 if ( FAILED( hr ) ) {
5457 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5461 bufferFrameCount -= numFramesPadding;
5463 if ( bufferFrameCount != 0 ) {
5464 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5465 if ( FAILED( hr ) ) {
5466 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5470 // Pull next buffer from outputBuffer
5471 // Fill render buffer with next buffer
5472 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5473 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5474 stream_.deviceFormat[OUTPUT] ) )
5476 // Release render buffer
5477 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5478 if ( FAILED( hr ) ) {
5479 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5485 // Inform WASAPI that render was unsuccessful
5486 hr = renderClient->ReleaseBuffer( 0, 0 );
5487 if ( FAILED( hr ) ) {
5488 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5495 // Inform WASAPI that render was unsuccessful
5496 hr = renderClient->ReleaseBuffer( 0, 0 );
5497 if ( FAILED( hr ) ) {
5498 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5504 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5505 if ( callbackPushed ) {
5506 // unsetting the callbackPulled flag lets the stream know that
5507 // the audio device is ready for another callback output buffer.
5508 callbackPulled = false;
5515 CoTaskMemFree( captureFormat );
5516 CoTaskMemFree( renderFormat );
5518 free ( convBuffer );
5519 delete renderResampler;
5520 delete captureResampler;
5524 // update stream state
5525 stream_.state = STREAM_STOPPED;
5527 if ( !errorText.empty() )
5529 errorText_ = errorText;
5534 //******************** End of __WINDOWS_WASAPI__ *********************//
5538 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5540 // Modified by Robin Davies, October 2005
5541 // - Improvements to DirectX pointer chasing.
5542 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5543 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5544 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5545 // Changed device query structure for RtAudio 4.0.7, January 2010
5547 #include <windows.h>
5548 #include <process.h>
5549 #include <mmsystem.h>
5553 #include <algorithm>
5555 #if defined(__MINGW32__)
5556 // missing from latest mingw winapi
5557 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5558 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5559 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5560 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5563 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5565 #ifdef _MSC_VER // if Microsoft Visual C++
5566 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5569 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5571 if ( pointer > bufferSize ) pointer -= bufferSize;
5572 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5573 if ( pointer < earlierPointer ) pointer += bufferSize;
5574 return pointer >= earlierPointer && pointer < laterPointer;
5577 // A structure to hold various information related to the DirectSound
5578 // API implementation.
5580 unsigned int drainCounter; // Tracks callback counts when draining
5581 bool internalDrain; // Indicates if stop is initiated from callback or not.
5585 UINT bufferPointer[2];
5586 DWORD dsBufferSize[2];
5587 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5591 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5594 // Declarations for utility functions, callbacks, and structures
5595 // specific to the DirectSound implementation.
5596 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5597 LPCTSTR description,
5601 static const char* getErrorString( int code );
5603 static unsigned __stdcall callbackHandler( void *ptr );
5612 : found(false) { validId[0] = false; validId[1] = false; }
5615 struct DsProbeData {
5617 std::vector<struct DsDevice>* dsDevices;
5620 RtApiDs :: RtApiDs()
5622 // Dsound will run both-threaded. If CoInitialize fails, then just
5623 // accept whatever the mainline chose for a threading model.
5624 coInitialized_ = false;
5625 HRESULT hr = CoInitialize( NULL );
5626 if ( !FAILED( hr ) ) coInitialized_ = true;
5629 RtApiDs :: ~RtApiDs()
5631 if ( stream_.state != STREAM_CLOSED ) closeStream();
5632 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5635 // The DirectSound default output is always the first device.
5636 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5641 // The DirectSound default input is always the first input device,
5642 // which is the first capture device enumerated.
5643 unsigned int RtApiDs :: getDefaultInputDevice( void )
5648 unsigned int RtApiDs :: getDeviceCount( void )
5650 // Set query flag for previously found devices to false, so that we
5651 // can check for any devices that have disappeared.
5652 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5653 dsDevices[i].found = false;
5655 // Query DirectSound devices.
5656 struct DsProbeData probeInfo;
5657 probeInfo.isInput = false;
5658 probeInfo.dsDevices = &dsDevices;
5659 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5660 if ( FAILED( result ) ) {
5661 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5662 errorText_ = errorStream_.str();
5663 error( RtAudioError::WARNING );
5666 // Query DirectSoundCapture devices.
5667 probeInfo.isInput = true;
5668 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5669 if ( FAILED( result ) ) {
5670 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5671 errorText_ = errorStream_.str();
5672 error( RtAudioError::WARNING );
5675 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5676 for ( unsigned int i=0; i<dsDevices.size(); ) {
5677 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5681 return static_cast<unsigned int>(dsDevices.size());
5684 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5686 RtAudio::DeviceInfo info;
5687 info.probed = false;
5689 if ( dsDevices.size() == 0 ) {
5690 // Force a query of all devices
5692 if ( dsDevices.size() == 0 ) {
5693 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5694 error( RtAudioError::INVALID_USE );
5699 if ( device >= dsDevices.size() ) {
5700 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5701 error( RtAudioError::INVALID_USE );
5706 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5708 LPDIRECTSOUND output;
5710 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5711 if ( FAILED( result ) ) {
5712 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5713 errorText_ = errorStream_.str();
5714 error( RtAudioError::WARNING );
5718 outCaps.dwSize = sizeof( outCaps );
5719 result = output->GetCaps( &outCaps );
5720 if ( FAILED( result ) ) {
5722 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5723 errorText_ = errorStream_.str();
5724 error( RtAudioError::WARNING );
5728 // Get output channel information.
5729 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5731 // Get sample rate information.
5732 info.sampleRates.clear();
5733 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5734 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5735 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5736 info.sampleRates.push_back( SAMPLE_RATES[k] );
5738 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5739 info.preferredSampleRate = SAMPLE_RATES[k];
5743 // Get format information.
5744 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5745 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5749 if ( getDefaultOutputDevice() == device )
5750 info.isDefaultOutput = true;
5752 if ( dsDevices[ device ].validId[1] == false ) {
5753 info.name = dsDevices[ device ].name;
5760 LPDIRECTSOUNDCAPTURE input;
5761 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5762 if ( FAILED( result ) ) {
5763 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5764 errorText_ = errorStream_.str();
5765 error( RtAudioError::WARNING );
5770 inCaps.dwSize = sizeof( inCaps );
5771 result = input->GetCaps( &inCaps );
5772 if ( FAILED( result ) ) {
5774 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5775 errorText_ = errorStream_.str();
5776 error( RtAudioError::WARNING );
5780 // Get input channel information.
5781 info.inputChannels = inCaps.dwChannels;
5783 // Get sample rate and format information.
5784 std::vector<unsigned int> rates;
5785 if ( inCaps.dwChannels >= 2 ) {
5786 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5787 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5788 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5789 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5790 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5791 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5792 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5793 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5795 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5796 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5797 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5798 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5799 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5801 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5802 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5803 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5804 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5805 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5808 else if ( inCaps.dwChannels == 1 ) {
5809 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5810 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5811 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5812 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5813 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5814 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5815 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5816 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5818 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5819 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5820 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5821 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5822 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5824 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5825 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5826 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5827 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5828 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5831 else info.inputChannels = 0; // technically, this would be an error
5835 if ( info.inputChannels == 0 ) return info;
5837 // Copy the supported rates to the info structure but avoid duplication.
5839 for ( unsigned int i=0; i<rates.size(); i++ ) {
5841 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5842 if ( rates[i] == info.sampleRates[j] ) {
5847 if ( found == false ) info.sampleRates.push_back( rates[i] );
5849 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5851 // If device opens for both playback and capture, we determine the channels.
5852 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5853 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5855 if ( device == 0 ) info.isDefaultInput = true;
5857 // Copy name and return.
5858 info.name = dsDevices[ device ].name;
5863 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5864 unsigned int firstChannel, unsigned int sampleRate,
5865 RtAudioFormat format, unsigned int *bufferSize,
5866 RtAudio::StreamOptions *options )
5868 if ( channels + firstChannel > 2 ) {
5869 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5873 size_t nDevices = dsDevices.size();
5874 if ( nDevices == 0 ) {
5875 // This should not happen because a check is made before this function is called.
5876 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5880 if ( device >= nDevices ) {
5881 // This should not happen because a check is made before this function is called.
5882 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5886 if ( mode == OUTPUT ) {
5887 if ( dsDevices[ device ].validId[0] == false ) {
5888 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5889 errorText_ = errorStream_.str();
5893 else { // mode == INPUT
5894 if ( dsDevices[ device ].validId[1] == false ) {
5895 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5896 errorText_ = errorStream_.str();
5901 // According to a note in PortAudio, using GetDesktopWindow()
5902 // instead of GetForegroundWindow() is supposed to avoid problems
5903 // that occur when the application's window is not the foreground
5904 // window. Also, if the application window closes before the
5905 // DirectSound buffer, DirectSound can crash. In the past, I had
5906 // problems when using GetDesktopWindow() but it seems fine now
5907 // (January 2010). I'll leave it commented here.
5908 // HWND hWnd = GetForegroundWindow();
5909 HWND hWnd = GetDesktopWindow();
5911 // Check the numberOfBuffers parameter and limit the lowest value to
5912 // two. This is a judgement call and a value of two is probably too
5913 // low for capture, but it should work for playback.
5915 if ( options ) nBuffers = options->numberOfBuffers;
5916 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5917 if ( nBuffers < 2 ) nBuffers = 3;
5919 // Check the lower range of the user-specified buffer size and set
5920 // (arbitrarily) to a lower bound of 32.
5921 if ( *bufferSize < 32 ) *bufferSize = 32;
5923 // Create the wave format structure. The data format setting will
5924 // be determined later.
5925 WAVEFORMATEX waveFormat;
5926 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5927 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5928 waveFormat.nChannels = channels + firstChannel;
5929 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5931 // Determine the device buffer size. By default, we'll use the value
5932 // defined above (32K), but we will grow it to make allowances for
5933 // very large software buffer sizes.
5934 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5935 DWORD dsPointerLeadTime = 0;
5937 void *ohandle = 0, *bhandle = 0;
5939 if ( mode == OUTPUT ) {
5941 LPDIRECTSOUND output;
5942 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5943 if ( FAILED( result ) ) {
5944 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5945 errorText_ = errorStream_.str();
5950 outCaps.dwSize = sizeof( outCaps );
5951 result = output->GetCaps( &outCaps );
5952 if ( FAILED( result ) ) {
5954 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5955 errorText_ = errorStream_.str();
5959 // Check channel information.
5960 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5961 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5962 errorText_ = errorStream_.str();
5966 // Check format information. Use 16-bit format unless not
5967 // supported or user requests 8-bit.
5968 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5969 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5970 waveFormat.wBitsPerSample = 16;
5971 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5974 waveFormat.wBitsPerSample = 8;
5975 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5977 stream_.userFormat = format;
5979 // Update wave format structure and buffer information.
5980 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5981 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5982 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5984 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5985 while ( dsPointerLeadTime * 2U > dsBufferSize )
5988 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5989 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5990 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5991 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5992 if ( FAILED( result ) ) {
5994 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5995 errorText_ = errorStream_.str();
5999 // Even though we will write to the secondary buffer, we need to
6000 // access the primary buffer to set the correct output format
6001 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
6002 // buffer description.
6003 DSBUFFERDESC bufferDescription;
6004 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6005 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6006 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6008 // Obtain the primary buffer
6009 LPDIRECTSOUNDBUFFER buffer;
6010 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6011 if ( FAILED( result ) ) {
6013 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6014 errorText_ = errorStream_.str();
6018 // Set the primary DS buffer sound format.
6019 result = buffer->SetFormat( &waveFormat );
6020 if ( FAILED( result ) ) {
6022 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6023 errorText_ = errorStream_.str();
6027 // Setup the secondary DS buffer description.
6028 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6029 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6030 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6031 DSBCAPS_GLOBALFOCUS |
6032 DSBCAPS_GETCURRENTPOSITION2 |
6033 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6034 bufferDescription.dwBufferBytes = dsBufferSize;
6035 bufferDescription.lpwfxFormat = &waveFormat;
6037 // Try to create the secondary DS buffer. If that doesn't work,
6038 // try to use software mixing. Otherwise, there's a problem.
6039 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6040 if ( FAILED( result ) ) {
6041 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6042 DSBCAPS_GLOBALFOCUS |
6043 DSBCAPS_GETCURRENTPOSITION2 |
6044 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6045 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6046 if ( FAILED( result ) ) {
6048 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6049 errorText_ = errorStream_.str();
6054 // Get the buffer size ... might be different from what we specified.
6056 dsbcaps.dwSize = sizeof( DSBCAPS );
6057 result = buffer->GetCaps( &dsbcaps );
6058 if ( FAILED( result ) ) {
6061 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6062 errorText_ = errorStream_.str();
6066 dsBufferSize = dsbcaps.dwBufferBytes;
6068 // Lock the DS buffer
6071 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6072 if ( FAILED( result ) ) {
6075 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6076 errorText_ = errorStream_.str();
6080 // Zero the DS buffer
6081 ZeroMemory( audioPtr, dataLen );
6083 // Unlock the DS buffer
6084 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6085 if ( FAILED( result ) ) {
6088 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6089 errorText_ = errorStream_.str();
6093 ohandle = (void *) output;
6094 bhandle = (void *) buffer;
6097 if ( mode == INPUT ) {
6099 LPDIRECTSOUNDCAPTURE input;
6100 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6101 if ( FAILED( result ) ) {
6102 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6103 errorText_ = errorStream_.str();
6108 inCaps.dwSize = sizeof( inCaps );
6109 result = input->GetCaps( &inCaps );
6110 if ( FAILED( result ) ) {
6112 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6113 errorText_ = errorStream_.str();
6117 // Check channel information.
6118 if ( inCaps.dwChannels < channels + firstChannel ) {
6119 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6123 // Check format information. Use 16-bit format unless user
6125 DWORD deviceFormats;
6126 if ( channels + firstChannel == 2 ) {
6127 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6128 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6129 waveFormat.wBitsPerSample = 8;
6130 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6132 else { // assume 16-bit is supported
6133 waveFormat.wBitsPerSample = 16;
6134 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6137 else { // channel == 1
6138 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6139 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6140 waveFormat.wBitsPerSample = 8;
6141 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6143 else { // assume 16-bit is supported
6144 waveFormat.wBitsPerSample = 16;
6145 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6148 stream_.userFormat = format;
6150 // Update wave format structure and buffer information.
6151 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6152 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6153 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6155 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6156 while ( dsPointerLeadTime * 2U > dsBufferSize )
6159 // Setup the secondary DS buffer description.
6160 DSCBUFFERDESC bufferDescription;
6161 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6162 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6163 bufferDescription.dwFlags = 0;
6164 bufferDescription.dwReserved = 0;
6165 bufferDescription.dwBufferBytes = dsBufferSize;
6166 bufferDescription.lpwfxFormat = &waveFormat;
6168 // Create the capture buffer.
6169 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6170 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6171 if ( FAILED( result ) ) {
6173 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6174 errorText_ = errorStream_.str();
6178 // Get the buffer size ... might be different from what we specified.
6180 dscbcaps.dwSize = sizeof( DSCBCAPS );
6181 result = buffer->GetCaps( &dscbcaps );
6182 if ( FAILED( result ) ) {
6185 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6186 errorText_ = errorStream_.str();
6190 dsBufferSize = dscbcaps.dwBufferBytes;
6192 // NOTE: We could have a problem here if this is a duplex stream
6193 // and the play and capture hardware buffer sizes are different
6194 // (I'm actually not sure if that is a problem or not).
6195 // Currently, we are not verifying that.
6197 // Lock the capture buffer
6200 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6201 if ( FAILED( result ) ) {
6204 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6205 errorText_ = errorStream_.str();
6210 ZeroMemory( audioPtr, dataLen );
6212 // Unlock the buffer
6213 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6214 if ( FAILED( result ) ) {
6217 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6218 errorText_ = errorStream_.str();
6222 ohandle = (void *) input;
6223 bhandle = (void *) buffer;
6226 // Set various stream parameters
6227 DsHandle *handle = 0;
6228 stream_.nDeviceChannels[mode] = channels + firstChannel;
6229 stream_.nUserChannels[mode] = channels;
6230 stream_.bufferSize = *bufferSize;
6231 stream_.channelOffset[mode] = firstChannel;
6232 stream_.deviceInterleaved[mode] = true;
6233 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6234 else stream_.userInterleaved = true;
6236 // Set flag for buffer conversion
6237 stream_.doConvertBuffer[mode] = false;
6238 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6239 stream_.doConvertBuffer[mode] = true;
6240 if (stream_.userFormat != stream_.deviceFormat[mode])
6241 stream_.doConvertBuffer[mode] = true;
6242 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6243 stream_.nUserChannels[mode] > 1 )
6244 stream_.doConvertBuffer[mode] = true;
6246 // Allocate necessary internal buffers
6247 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6248 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6249 if ( stream_.userBuffer[mode] == NULL ) {
6250 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6254 if ( stream_.doConvertBuffer[mode] ) {
6256 bool makeBuffer = true;
6257 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6258 if ( mode == INPUT ) {
6259 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6260 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6261 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6266 bufferBytes *= *bufferSize;
6267 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6268 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6269 if ( stream_.deviceBuffer == NULL ) {
6270 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6276 // Allocate our DsHandle structures for the stream.
6277 if ( stream_.apiHandle == 0 ) {
6279 handle = new DsHandle;
6281 catch ( std::bad_alloc& ) {
6282 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6286 // Create a manual-reset event.
6287 handle->condition = CreateEvent( NULL, // no security
6288 TRUE, // manual-reset
6289 FALSE, // non-signaled initially
6291 stream_.apiHandle = (void *) handle;
6294 handle = (DsHandle *) stream_.apiHandle;
6295 handle->id[mode] = ohandle;
6296 handle->buffer[mode] = bhandle;
6297 handle->dsBufferSize[mode] = dsBufferSize;
6298 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6300 stream_.device[mode] = device;
6301 stream_.state = STREAM_STOPPED;
6302 if ( stream_.mode == OUTPUT && mode == INPUT )
6303 // We had already set up an output stream.
6304 stream_.mode = DUPLEX;
6306 stream_.mode = mode;
6307 stream_.nBuffers = nBuffers;
6308 stream_.sampleRate = sampleRate;
6310 // Setup the buffer conversion information structure.
6311 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6313 // Setup the callback thread.
6314 if ( stream_.callbackInfo.isRunning == false ) {
6316 stream_.callbackInfo.isRunning = true;
6317 stream_.callbackInfo.object = (void *) this;
6318 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6319 &stream_.callbackInfo, 0, &threadId );
6320 if ( stream_.callbackInfo.thread == 0 ) {
6321 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6325 // Boost DS thread priority
6326 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6332 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6333 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6334 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6335 if ( buffer ) buffer->Release();
6338 if ( handle->buffer[1] ) {
6339 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6340 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6341 if ( buffer ) buffer->Release();
6344 CloseHandle( handle->condition );
6346 stream_.apiHandle = 0;
6349 for ( int i=0; i<2; i++ ) {
6350 if ( stream_.userBuffer[i] ) {
6351 free( stream_.userBuffer[i] );
6352 stream_.userBuffer[i] = 0;
6356 if ( stream_.deviceBuffer ) {
6357 free( stream_.deviceBuffer );
6358 stream_.deviceBuffer = 0;
6361 stream_.state = STREAM_CLOSED;
6365 void RtApiDs :: closeStream()
6367 if ( stream_.state == STREAM_CLOSED ) {
6368 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6369 error( RtAudioError::WARNING );
6373 // Stop the callback thread.
6374 stream_.callbackInfo.isRunning = false;
6375 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6376 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6378 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6380 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6381 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6382 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6389 if ( handle->buffer[1] ) {
6390 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6391 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6398 CloseHandle( handle->condition );
6400 stream_.apiHandle = 0;
6403 for ( int i=0; i<2; i++ ) {
6404 if ( stream_.userBuffer[i] ) {
6405 free( stream_.userBuffer[i] );
6406 stream_.userBuffer[i] = 0;
6410 if ( stream_.deviceBuffer ) {
6411 free( stream_.deviceBuffer );
6412 stream_.deviceBuffer = 0;
6415 stream_.mode = UNINITIALIZED;
6416 stream_.state = STREAM_CLOSED;
6419 void RtApiDs :: startStream()
6422 if ( stream_.state == STREAM_RUNNING ) {
6423 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6424 error( RtAudioError::WARNING );
6428 #if defined( HAVE_GETTIMEOFDAY )
6429 gettimeofday( &stream_.lastTickTimestamp, NULL );
6432 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6434 // Increase scheduler frequency on lesser windows (a side-effect of
6435 // increasing timer accuracy). On greater windows (Win2K or later),
6436 // this is already in effect.
6437 timeBeginPeriod( 1 );
6439 buffersRolling = false;
6440 duplexPrerollBytes = 0;
6442 if ( stream_.mode == DUPLEX ) {
6443 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6444 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6450 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6451 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6452 if ( FAILED( result ) ) {
6453 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6454 errorText_ = errorStream_.str();
6459 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6461 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6462 result = buffer->Start( DSCBSTART_LOOPING );
6463 if ( FAILED( result ) ) {
6464 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6465 errorText_ = errorStream_.str();
6470 handle->drainCounter = 0;
6471 handle->internalDrain = false;
6472 ResetEvent( handle->condition );
6473 stream_.state = STREAM_RUNNING;
6476 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6479 void RtApiDs :: stopStream()
6482 if ( stream_.state == STREAM_STOPPED ) {
6483 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6484 error( RtAudioError::WARNING );
6491 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6492 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6493 if ( handle->drainCounter == 0 ) {
6494 handle->drainCounter = 2;
6495 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6498 stream_.state = STREAM_STOPPED;
6500 MUTEX_LOCK( &stream_.mutex );
6502 // Stop the buffer and clear memory
6503 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6504 result = buffer->Stop();
6505 if ( FAILED( result ) ) {
6506 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6507 errorText_ = errorStream_.str();
6511 // Lock the buffer and clear it so that if we start to play again,
6512 // we won't have old data playing.
6513 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6514 if ( FAILED( result ) ) {
6515 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6516 errorText_ = errorStream_.str();
6520 // Zero the DS buffer
6521 ZeroMemory( audioPtr, dataLen );
6523 // Unlock the DS buffer
6524 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6525 if ( FAILED( result ) ) {
6526 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6527 errorText_ = errorStream_.str();
6531 // If we start playing again, we must begin at beginning of buffer.
6532 handle->bufferPointer[0] = 0;
6535 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6536 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6540 stream_.state = STREAM_STOPPED;
6542 if ( stream_.mode != DUPLEX )
6543 MUTEX_LOCK( &stream_.mutex );
6545 result = buffer->Stop();
6546 if ( FAILED( result ) ) {
6547 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6548 errorText_ = errorStream_.str();
6552 // Lock the buffer and clear it so that if we start to play again,
6553 // we won't have old data playing.
6554 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6555 if ( FAILED( result ) ) {
6556 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6557 errorText_ = errorStream_.str();
6561 // Zero the DS buffer
6562 ZeroMemory( audioPtr, dataLen );
6564 // Unlock the DS buffer
6565 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6566 if ( FAILED( result ) ) {
6567 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6568 errorText_ = errorStream_.str();
6572 // If we start recording again, we must begin at beginning of buffer.
6573 handle->bufferPointer[1] = 0;
6577 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6578 MUTEX_UNLOCK( &stream_.mutex );
6580 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6583 void RtApiDs :: abortStream()
6586 if ( stream_.state == STREAM_STOPPED ) {
6587 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6588 error( RtAudioError::WARNING );
6592 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6593 handle->drainCounter = 2;
6598 void RtApiDs :: callbackEvent()
6600 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6601 Sleep( 50 ); // sleep 50 milliseconds
6605 if ( stream_.state == STREAM_CLOSED ) {
6606 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6607 error( RtAudioError::WARNING );
6611 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6612 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6614 // Check if we were draining the stream and signal is finished.
6615 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6617 stream_.state = STREAM_STOPPING;
6618 if ( handle->internalDrain == false )
6619 SetEvent( handle->condition );
6625 // Invoke user callback to get fresh output data UNLESS we are
6627 if ( handle->drainCounter == 0 ) {
6628 RtAudioCallback callback = (RtAudioCallback) info->callback;
6629 double streamTime = getStreamTime();
6630 RtAudioStreamStatus status = 0;
6631 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6632 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6633 handle->xrun[0] = false;
6635 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6636 status |= RTAUDIO_INPUT_OVERFLOW;
6637 handle->xrun[1] = false;
6639 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6640 stream_.bufferSize, streamTime, status, info->userData );
6641 if ( cbReturnValue == 2 ) {
6642 stream_.state = STREAM_STOPPING;
6643 handle->drainCounter = 2;
6647 else if ( cbReturnValue == 1 ) {
6648 handle->drainCounter = 1;
6649 handle->internalDrain = true;
6654 DWORD currentWritePointer, safeWritePointer;
6655 DWORD currentReadPointer, safeReadPointer;
6656 UINT nextWritePointer;
6658 LPVOID buffer1 = NULL;
6659 LPVOID buffer2 = NULL;
6660 DWORD bufferSize1 = 0;
6661 DWORD bufferSize2 = 0;
6666 MUTEX_LOCK( &stream_.mutex );
6667 if ( stream_.state == STREAM_STOPPED ) {
6668 MUTEX_UNLOCK( &stream_.mutex );
6672 if ( buffersRolling == false ) {
6673 if ( stream_.mode == DUPLEX ) {
6674 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6676 // It takes a while for the devices to get rolling. As a result,
6677 // there's no guarantee that the capture and write device pointers
6678 // will move in lockstep. Wait here for both devices to start
6679 // rolling, and then set our buffer pointers accordingly.
6680 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6681 // bytes later than the write buffer.
6683 // Stub: a serious risk of having a pre-emptive scheduling round
6684 // take place between the two GetCurrentPosition calls... but I'm
6685 // really not sure how to solve the problem. Temporarily boost to
6686 // Realtime priority, maybe; but I'm not sure what priority the
6687 // DirectSound service threads run at. We *should* be roughly
6688 // within a ms or so of correct.
6690 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6691 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6693 DWORD startSafeWritePointer, startSafeReadPointer;
6695 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6696 if ( FAILED( result ) ) {
6697 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6698 errorText_ = errorStream_.str();
6699 MUTEX_UNLOCK( &stream_.mutex );
6700 error( RtAudioError::SYSTEM_ERROR );
6703 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6704 if ( FAILED( result ) ) {
6705 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6706 errorText_ = errorStream_.str();
6707 MUTEX_UNLOCK( &stream_.mutex );
6708 error( RtAudioError::SYSTEM_ERROR );
6712 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6713 if ( FAILED( result ) ) {
6714 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6715 errorText_ = errorStream_.str();
6716 MUTEX_UNLOCK( &stream_.mutex );
6717 error( RtAudioError::SYSTEM_ERROR );
6720 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6721 if ( FAILED( result ) ) {
6722 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6723 errorText_ = errorStream_.str();
6724 MUTEX_UNLOCK( &stream_.mutex );
6725 error( RtAudioError::SYSTEM_ERROR );
6728 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6732 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6734 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6735 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6736 handle->bufferPointer[1] = safeReadPointer;
6738 else if ( stream_.mode == OUTPUT ) {
6740 // Set the proper nextWritePosition after initial startup.
6741 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6742 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6743 if ( FAILED( result ) ) {
6744 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6745 errorText_ = errorStream_.str();
6746 MUTEX_UNLOCK( &stream_.mutex );
6747 error( RtAudioError::SYSTEM_ERROR );
6750 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6751 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6754 buffersRolling = true;
6757 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6759 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6761 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6762 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6763 bufferBytes *= formatBytes( stream_.userFormat );
6764 memset( stream_.userBuffer[0], 0, bufferBytes );
6767 // Setup parameters and do buffer conversion if necessary.
6768 if ( stream_.doConvertBuffer[0] ) {
6769 buffer = stream_.deviceBuffer;
6770 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6771 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6772 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6775 buffer = stream_.userBuffer[0];
6776 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6777 bufferBytes *= formatBytes( stream_.userFormat );
6780 // No byte swapping necessary in DirectSound implementation.
6782 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6783 // unsigned. So, we need to convert our signed 8-bit data here to
6785 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6786 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6788 DWORD dsBufferSize = handle->dsBufferSize[0];
6789 nextWritePointer = handle->bufferPointer[0];
6791 DWORD endWrite, leadPointer;
6793 // Find out where the read and "safe write" pointers are.
6794 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6795 if ( FAILED( result ) ) {
6796 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6797 errorText_ = errorStream_.str();
6798 MUTEX_UNLOCK( &stream_.mutex );
6799 error( RtAudioError::SYSTEM_ERROR );
6803 // We will copy our output buffer into the region between
6804 // safeWritePointer and leadPointer. If leadPointer is not
6805 // beyond the next endWrite position, wait until it is.
6806 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6807 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6808 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6809 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6810 endWrite = nextWritePointer + bufferBytes;
6812 // Check whether the entire write region is behind the play pointer.
6813 if ( leadPointer >= endWrite ) break;
6815 // If we are here, then we must wait until the leadPointer advances
6816 // beyond the end of our next write region. We use the
6817 // Sleep() function to suspend operation until that happens.
6818 double millis = ( endWrite - leadPointer ) * 1000.0;
6819 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6820 if ( millis < 1.0 ) millis = 1.0;
6821 Sleep( (DWORD) millis );
6824 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6825 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6826 // We've strayed into the forbidden zone ... resync the read pointer.
6827 handle->xrun[0] = true;
6828 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6829 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6830 handle->bufferPointer[0] = nextWritePointer;
6831 endWrite = nextWritePointer + bufferBytes;
6834 // Lock free space in the buffer
6835 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6836 &bufferSize1, &buffer2, &bufferSize2, 0 );
6837 if ( FAILED( result ) ) {
6838 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6839 errorText_ = errorStream_.str();
6840 MUTEX_UNLOCK( &stream_.mutex );
6841 error( RtAudioError::SYSTEM_ERROR );
6845 // Copy our buffer into the DS buffer
6846 CopyMemory( buffer1, buffer, bufferSize1 );
6847 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6849 // Update our buffer offset and unlock sound buffer
6850 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6851 if ( FAILED( result ) ) {
6852 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6853 errorText_ = errorStream_.str();
6854 MUTEX_UNLOCK( &stream_.mutex );
6855 error( RtAudioError::SYSTEM_ERROR );
6858 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6859 handle->bufferPointer[0] = nextWritePointer;
6862 // Don't bother draining input
6863 if ( handle->drainCounter ) {
6864 handle->drainCounter++;
6868 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6870 // Setup parameters.
6871 if ( stream_.doConvertBuffer[1] ) {
6872 buffer = stream_.deviceBuffer;
6873 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6874 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6877 buffer = stream_.userBuffer[1];
6878 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6879 bufferBytes *= formatBytes( stream_.userFormat );
6882 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6883 long nextReadPointer = handle->bufferPointer[1];
6884 DWORD dsBufferSize = handle->dsBufferSize[1];
6886 // Find out where the write and "safe read" pointers are.
6887 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6888 if ( FAILED( result ) ) {
6889 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6890 errorText_ = errorStream_.str();
6891 MUTEX_UNLOCK( &stream_.mutex );
6892 error( RtAudioError::SYSTEM_ERROR );
6896 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6897 DWORD endRead = nextReadPointer + bufferBytes;
6899 // Handling depends on whether we are INPUT or DUPLEX.
6900 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6901 // then a wait here will drag the write pointers into the forbidden zone.
6903 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6904 // it's in a safe position. This causes dropouts, but it seems to be the only
6905 // practical way to sync up the read and write pointers reliably, given the
6906 // the very complex relationship between phase and increment of the read and write
6909 // In order to minimize audible dropouts in DUPLEX mode, we will
6910 // provide a pre-roll period of 0.5 seconds in which we return
6911 // zeros from the read buffer while the pointers sync up.
6913 if ( stream_.mode == DUPLEX ) {
6914 if ( safeReadPointer < endRead ) {
6915 if ( duplexPrerollBytes <= 0 ) {
6916 // Pre-roll time over. Be more agressive.
6917 int adjustment = endRead-safeReadPointer;
6919 handle->xrun[1] = true;
6921 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6922 // and perform fine adjustments later.
6923 // - small adjustments: back off by twice as much.
6924 if ( adjustment >= 2*bufferBytes )
6925 nextReadPointer = safeReadPointer-2*bufferBytes;
6927 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6929 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6933 // In pre=roll time. Just do it.
6934 nextReadPointer = safeReadPointer - bufferBytes;
6935 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6937 endRead = nextReadPointer + bufferBytes;
6940 else { // mode == INPUT
6941 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6942 // See comments for playback.
6943 double millis = (endRead - safeReadPointer) * 1000.0;
6944 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6945 if ( millis < 1.0 ) millis = 1.0;
6946 Sleep( (DWORD) millis );
6948 // Wake up and find out where we are now.
6949 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6950 if ( FAILED( result ) ) {
6951 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6952 errorText_ = errorStream_.str();
6953 MUTEX_UNLOCK( &stream_.mutex );
6954 error( RtAudioError::SYSTEM_ERROR );
6958 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6962 // Lock free space in the buffer
6963 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6964 &bufferSize1, &buffer2, &bufferSize2, 0 );
6965 if ( FAILED( result ) ) {
6966 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6967 errorText_ = errorStream_.str();
6968 MUTEX_UNLOCK( &stream_.mutex );
6969 error( RtAudioError::SYSTEM_ERROR );
6973 if ( duplexPrerollBytes <= 0 ) {
6974 // Copy our buffer into the DS buffer
6975 CopyMemory( buffer, buffer1, bufferSize1 );
6976 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6979 memset( buffer, 0, bufferSize1 );
6980 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6981 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6984 // Update our buffer offset and unlock sound buffer
6985 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6986 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6987 if ( FAILED( result ) ) {
6988 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6989 errorText_ = errorStream_.str();
6990 MUTEX_UNLOCK( &stream_.mutex );
6991 error( RtAudioError::SYSTEM_ERROR );
6994 handle->bufferPointer[1] = nextReadPointer;
6996 // No byte swapping necessary in DirectSound implementation.
6998 // If necessary, convert 8-bit data from unsigned to signed.
6999 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
7000 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
7002 // Do buffer conversion if necessary.
7003 if ( stream_.doConvertBuffer[1] )
7004 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7008 MUTEX_UNLOCK( &stream_.mutex );
7009 RtApi::tickStreamTime();
7012 // Definitions for utility functions and callbacks
7013 // specific to the DirectSound implementation.
7015 static unsigned __stdcall callbackHandler( void *ptr )
7017 CallbackInfo *info = (CallbackInfo *) ptr;
7018 RtApiDs *object = (RtApiDs *) info->object;
7019 bool* isRunning = &info->isRunning;
7021 while ( *isRunning == true ) {
7022 object->callbackEvent();
7029 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7030 LPCTSTR description,
7034 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7035 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7038 bool validDevice = false;
7039 if ( probeInfo.isInput == true ) {
7041 LPDIRECTSOUNDCAPTURE object;
7043 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7044 if ( hr != DS_OK ) return TRUE;
7046 caps.dwSize = sizeof(caps);
7047 hr = object->GetCaps( &caps );
7048 if ( hr == DS_OK ) {
7049 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7056 LPDIRECTSOUND object;
7057 hr = DirectSoundCreate( lpguid, &object, NULL );
7058 if ( hr != DS_OK ) return TRUE;
7060 caps.dwSize = sizeof(caps);
7061 hr = object->GetCaps( &caps );
7062 if ( hr == DS_OK ) {
7063 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7069 // If good device, then save its name and guid.
7070 std::string name = convertCharPointerToStdString( description );
7071 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7072 if ( lpguid == NULL )
7073 name = "Default Device";
7074 if ( validDevice ) {
7075 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7076 if ( dsDevices[i].name == name ) {
7077 dsDevices[i].found = true;
7078 if ( probeInfo.isInput ) {
7079 dsDevices[i].id[1] = lpguid;
7080 dsDevices[i].validId[1] = true;
7083 dsDevices[i].id[0] = lpguid;
7084 dsDevices[i].validId[0] = true;
7092 device.found = true;
7093 if ( probeInfo.isInput ) {
7094 device.id[1] = lpguid;
7095 device.validId[1] = true;
7098 device.id[0] = lpguid;
7099 device.validId[0] = true;
7101 dsDevices.push_back( device );
7107 static const char* getErrorString( int code )
7111 case DSERR_ALLOCATED:
7112 return "Already allocated";
7114 case DSERR_CONTROLUNAVAIL:
7115 return "Control unavailable";
7117 case DSERR_INVALIDPARAM:
7118 return "Invalid parameter";
7120 case DSERR_INVALIDCALL:
7121 return "Invalid call";
7124 return "Generic error";
7126 case DSERR_PRIOLEVELNEEDED:
7127 return "Priority level needed";
7129 case DSERR_OUTOFMEMORY:
7130 return "Out of memory";
7132 case DSERR_BADFORMAT:
7133 return "The sample rate or the channel format is not supported";
7135 case DSERR_UNSUPPORTED:
7136 return "Not supported";
7138 case DSERR_NODRIVER:
7141 case DSERR_ALREADYINITIALIZED:
7142 return "Already initialized";
7144 case DSERR_NOAGGREGATION:
7145 return "No aggregation";
7147 case DSERR_BUFFERLOST:
7148 return "Buffer lost";
7150 case DSERR_OTHERAPPHASPRIO:
7151 return "Another application already has priority";
7153 case DSERR_UNINITIALIZED:
7154 return "Uninitialized";
7157 return "DirectSound unknown error";
7160 //******************** End of __WINDOWS_DS__ *********************//
7164 #if defined(__LINUX_ALSA__)
7166 #include <alsa/asoundlib.h>
7169 // A structure to hold various information related to the ALSA API
7172 snd_pcm_t *handles[2];
7175 pthread_cond_t runnable_cv;
7179 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7182 static void *alsaCallbackHandler( void * ptr );
7184 RtApiAlsa :: RtApiAlsa()
7186 // Nothing to do here.
7189 RtApiAlsa :: ~RtApiAlsa()
7191 if ( stream_.state != STREAM_CLOSED ) closeStream();
7194 unsigned int RtApiAlsa :: getDeviceCount( void )
7196 unsigned nDevices = 0;
7197 int result, subdevice, card;
7199 snd_ctl_t *handle = 0;
7201 // Count cards and devices
7203 snd_card_next( &card );
7204 while ( card >= 0 ) {
7205 sprintf( name, "hw:%d", card );
7206 result = snd_ctl_open( &handle, name, 0 );
7209 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7210 errorText_ = errorStream_.str();
7211 error( RtAudioError::WARNING );
7216 result = snd_ctl_pcm_next_device( handle, &subdevice );
7218 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7219 errorText_ = errorStream_.str();
7220 error( RtAudioError::WARNING );
7223 if ( subdevice < 0 )
7229 snd_ctl_close( handle );
7230 snd_card_next( &card );
7233 result = snd_ctl_open( &handle, "default", 0 );
7236 snd_ctl_close( handle );
7242 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7244 RtAudio::DeviceInfo info;
7245 info.probed = false;
7247 unsigned nDevices = 0;
7248 int result, subdevice, card;
7250 snd_ctl_t *chandle = 0;
7252 // Count cards and devices
7255 snd_card_next( &card );
7256 while ( card >= 0 ) {
7257 sprintf( name, "hw:%d", card );
7258 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7261 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7262 errorText_ = errorStream_.str();
7263 error( RtAudioError::WARNING );
7268 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7270 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7271 errorText_ = errorStream_.str();
7272 error( RtAudioError::WARNING );
7275 if ( subdevice < 0 ) break;
7276 if ( nDevices == device ) {
7277 sprintf( name, "hw:%d,%d", card, subdevice );
7284 snd_ctl_close( chandle );
7285 snd_card_next( &card );
7288 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7289 if ( result == 0 ) {
7290 if ( nDevices == device ) {
7291 strcpy( name, "default" );
7297 if ( nDevices == 0 ) {
7298 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7299 error( RtAudioError::INVALID_USE );
7303 if ( device >= nDevices ) {
7304 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7305 error( RtAudioError::INVALID_USE );
7311 // If a stream is already open, we cannot probe the stream devices.
7312 // Thus, use the saved results.
7313 if ( stream_.state != STREAM_CLOSED &&
7314 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7315 snd_ctl_close( chandle );
7316 if ( device >= devices_.size() ) {
7317 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7318 error( RtAudioError::WARNING );
7321 return devices_[ device ];
7324 int openMode = SND_PCM_ASYNC;
7325 snd_pcm_stream_t stream;
7326 snd_pcm_info_t *pcminfo;
7327 snd_pcm_info_alloca( &pcminfo );
7329 snd_pcm_hw_params_t *params;
7330 snd_pcm_hw_params_alloca( ¶ms );
7332 // First try for playback unless default device (which has subdev -1)
7333 stream = SND_PCM_STREAM_PLAYBACK;
7334 snd_pcm_info_set_stream( pcminfo, stream );
7335 if ( subdevice != -1 ) {
7336 snd_pcm_info_set_device( pcminfo, subdevice );
7337 snd_pcm_info_set_subdevice( pcminfo, 0 );
7339 result = snd_ctl_pcm_info( chandle, pcminfo );
7341 // Device probably doesn't support playback.
7346 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7348 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7349 errorText_ = errorStream_.str();
7350 error( RtAudioError::WARNING );
7354 // The device is open ... fill the parameter structure.
7355 result = snd_pcm_hw_params_any( phandle, params );
7357 snd_pcm_close( phandle );
7358 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7359 errorText_ = errorStream_.str();
7360 error( RtAudioError::WARNING );
7364 // Get output channel information.
7366 result = snd_pcm_hw_params_get_channels_max( params, &value );
7368 snd_pcm_close( phandle );
7369 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7370 errorText_ = errorStream_.str();
7371 error( RtAudioError::WARNING );
7374 info.outputChannels = value;
7375 snd_pcm_close( phandle );
7378 stream = SND_PCM_STREAM_CAPTURE;
7379 snd_pcm_info_set_stream( pcminfo, stream );
7381 // Now try for capture unless default device (with subdev = -1)
7382 if ( subdevice != -1 ) {
7383 result = snd_ctl_pcm_info( chandle, pcminfo );
7384 snd_ctl_close( chandle );
7386 // Device probably doesn't support capture.
7387 if ( info.outputChannels == 0 ) return info;
7388 goto probeParameters;
7392 snd_ctl_close( chandle );
7394 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7396 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7397 errorText_ = errorStream_.str();
7398 error( RtAudioError::WARNING );
7399 if ( info.outputChannels == 0 ) return info;
7400 goto probeParameters;
7403 // The device is open ... fill the parameter structure.
7404 result = snd_pcm_hw_params_any( phandle, params );
7406 snd_pcm_close( phandle );
7407 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7408 errorText_ = errorStream_.str();
7409 error( RtAudioError::WARNING );
7410 if ( info.outputChannels == 0 ) return info;
7411 goto probeParameters;
7414 result = snd_pcm_hw_params_get_channels_max( params, &value );
7416 snd_pcm_close( phandle );
7417 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7418 errorText_ = errorStream_.str();
7419 error( RtAudioError::WARNING );
7420 if ( info.outputChannels == 0 ) return info;
7421 goto probeParameters;
7423 info.inputChannels = value;
7424 snd_pcm_close( phandle );
7426 // If device opens for both playback and capture, we determine the channels.
7427 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7428 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7430 // ALSA doesn't provide default devices so we'll use the first available one.
7431 if ( device == 0 && info.outputChannels > 0 )
7432 info.isDefaultOutput = true;
7433 if ( device == 0 && info.inputChannels > 0 )
7434 info.isDefaultInput = true;
7437 // At this point, we just need to figure out the supported data
7438 // formats and sample rates. We'll proceed by opening the device in
7439 // the direction with the maximum number of channels, or playback if
7440 // they are equal. This might limit our sample rate options, but so
7443 if ( info.outputChannels >= info.inputChannels )
7444 stream = SND_PCM_STREAM_PLAYBACK;
7446 stream = SND_PCM_STREAM_CAPTURE;
7447 snd_pcm_info_set_stream( pcminfo, stream );
7449 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7451 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7452 errorText_ = errorStream_.str();
7453 error( RtAudioError::WARNING );
7457 // The device is open ... fill the parameter structure.
7458 result = snd_pcm_hw_params_any( phandle, params );
7460 snd_pcm_close( phandle );
7461 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7462 errorText_ = errorStream_.str();
7463 error( RtAudioError::WARNING );
7467 // Test our discrete set of sample rate values.
7468 info.sampleRates.clear();
7469 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7470 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7471 info.sampleRates.push_back( SAMPLE_RATES[i] );
7473 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7474 info.preferredSampleRate = SAMPLE_RATES[i];
7477 if ( info.sampleRates.size() == 0 ) {
7478 snd_pcm_close( phandle );
7479 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7480 errorText_ = errorStream_.str();
7481 error( RtAudioError::WARNING );
7485 // Probe the supported data formats ... we don't care about endian-ness just yet
7486 snd_pcm_format_t format;
7487 info.nativeFormats = 0;
7488 format = SND_PCM_FORMAT_S8;
7489 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7490 info.nativeFormats |= RTAUDIO_SINT8;
7491 format = SND_PCM_FORMAT_S16;
7492 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7493 info.nativeFormats |= RTAUDIO_SINT16;
7494 format = SND_PCM_FORMAT_S24;
7495 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7496 info.nativeFormats |= RTAUDIO_SINT24;
7497 format = SND_PCM_FORMAT_S32;
7498 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7499 info.nativeFormats |= RTAUDIO_SINT32;
7500 format = SND_PCM_FORMAT_FLOAT;
7501 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7502 info.nativeFormats |= RTAUDIO_FLOAT32;
7503 format = SND_PCM_FORMAT_FLOAT64;
7504 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7505 info.nativeFormats |= RTAUDIO_FLOAT64;
7507 // Check that we have at least one supported format
7508 if ( info.nativeFormats == 0 ) {
7509 snd_pcm_close( phandle );
7510 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7511 errorText_ = errorStream_.str();
7512 error( RtAudioError::WARNING );
7516 // Get the device name
7518 result = snd_card_get_name( card, &cardname );
7519 if ( result >= 0 ) {
7520 sprintf( name, "hw:%s,%d", cardname, subdevice );
7525 // That's all ... close the device and return
7526 snd_pcm_close( phandle );
7531 void RtApiAlsa :: saveDeviceInfo( void )
7535 unsigned int nDevices = getDeviceCount();
7536 devices_.resize( nDevices );
7537 for ( unsigned int i=0; i<nDevices; i++ )
7538 devices_[i] = getDeviceInfo( i );
7541 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7542 unsigned int firstChannel, unsigned int sampleRate,
7543 RtAudioFormat format, unsigned int *bufferSize,
7544 RtAudio::StreamOptions *options )
7547 #if defined(__RTAUDIO_DEBUG__)
7549 snd_output_stdio_attach(&out, stderr, 0);
7552 // I'm not using the "plug" interface ... too much inconsistent behavior.
7554 unsigned nDevices = 0;
7555 int result, subdevice, card;
7559 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7560 snprintf(name, sizeof(name), "%s", "default");
7562 // Count cards and devices
7564 snd_card_next( &card );
7565 while ( card >= 0 ) {
7566 sprintf( name, "hw:%d", card );
7567 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7569 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7570 errorText_ = errorStream_.str();
7575 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7576 if ( result < 0 ) break;
7577 if ( subdevice < 0 ) break;
7578 if ( nDevices == device ) {
7579 sprintf( name, "hw:%d,%d", card, subdevice );
7580 snd_ctl_close( chandle );
7585 snd_ctl_close( chandle );
7586 snd_card_next( &card );
7589 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7590 if ( result == 0 ) {
7591 if ( nDevices == device ) {
7592 strcpy( name, "default" );
7593 snd_ctl_close( chandle );
7598 snd_ctl_close( chandle );
7600 if ( nDevices == 0 ) {
7601 // This should not happen because a check is made before this function is called.
7602 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7606 if ( device >= nDevices ) {
7607 // This should not happen because a check is made before this function is called.
7608 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7615 // The getDeviceInfo() function will not work for a device that is
7616 // already open. Thus, we'll probe the system before opening a
7617 // stream and save the results for use by getDeviceInfo().
7618 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7619 this->saveDeviceInfo();
7621 snd_pcm_stream_t stream;
7622 if ( mode == OUTPUT )
7623 stream = SND_PCM_STREAM_PLAYBACK;
7625 stream = SND_PCM_STREAM_CAPTURE;
7628 int openMode = SND_PCM_ASYNC;
7629 result = snd_pcm_open( &phandle, name, stream, openMode );
7631 if ( mode == OUTPUT )
7632 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7634 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7635 errorText_ = errorStream_.str();
7639 // Fill the parameter structure.
7640 snd_pcm_hw_params_t *hw_params;
7641 snd_pcm_hw_params_alloca( &hw_params );
7642 result = snd_pcm_hw_params_any( phandle, hw_params );
7644 snd_pcm_close( phandle );
7645 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7646 errorText_ = errorStream_.str();
7650 #if defined(__RTAUDIO_DEBUG__)
7651 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7652 snd_pcm_hw_params_dump( hw_params, out );
7655 // Set access ... check user preference.
7656 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7657 stream_.userInterleaved = false;
7658 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7660 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7661 stream_.deviceInterleaved[mode] = true;
7664 stream_.deviceInterleaved[mode] = false;
7667 stream_.userInterleaved = true;
7668 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7670 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7671 stream_.deviceInterleaved[mode] = false;
7674 stream_.deviceInterleaved[mode] = true;
7678 snd_pcm_close( phandle );
7679 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7680 errorText_ = errorStream_.str();
7684 // Determine how to set the device format.
7685 stream_.userFormat = format;
7686 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7688 if ( format == RTAUDIO_SINT8 )
7689 deviceFormat = SND_PCM_FORMAT_S8;
7690 else if ( format == RTAUDIO_SINT16 )
7691 deviceFormat = SND_PCM_FORMAT_S16;
7692 else if ( format == RTAUDIO_SINT24 )
7693 deviceFormat = SND_PCM_FORMAT_S24;
7694 else if ( format == RTAUDIO_SINT32 )
7695 deviceFormat = SND_PCM_FORMAT_S32;
7696 else if ( format == RTAUDIO_FLOAT32 )
7697 deviceFormat = SND_PCM_FORMAT_FLOAT;
7698 else if ( format == RTAUDIO_FLOAT64 )
7699 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7701 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7702 stream_.deviceFormat[mode] = format;
7706 // The user requested format is not natively supported by the device.
7707 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7708 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7709 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7713 deviceFormat = SND_PCM_FORMAT_FLOAT;
7714 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7715 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7719 deviceFormat = SND_PCM_FORMAT_S32;
7720 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7721 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7725 deviceFormat = SND_PCM_FORMAT_S24;
7726 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7727 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7731 deviceFormat = SND_PCM_FORMAT_S16;
7732 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7733 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7737 deviceFormat = SND_PCM_FORMAT_S8;
7738 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7739 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7743 // If we get here, no supported format was found.
7744 snd_pcm_close( phandle );
7745 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7746 errorText_ = errorStream_.str();
7750 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7752 snd_pcm_close( phandle );
7753 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7754 errorText_ = errorStream_.str();
7758 // Determine whether byte-swaping is necessary.
7759 stream_.doByteSwap[mode] = false;
7760 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7761 result = snd_pcm_format_cpu_endian( deviceFormat );
7763 stream_.doByteSwap[mode] = true;
7764 else if (result < 0) {
7765 snd_pcm_close( phandle );
7766 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7767 errorText_ = errorStream_.str();
7772 // Set the sample rate.
7773 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7775 snd_pcm_close( phandle );
7776 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7777 errorText_ = errorStream_.str();
7781 // Determine the number of channels for this device. We support a possible
7782 // minimum device channel number > than the value requested by the user.
7783 stream_.nUserChannels[mode] = channels;
7785 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7786 unsigned int deviceChannels = value;
7787 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7788 snd_pcm_close( phandle );
7789 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7790 errorText_ = errorStream_.str();
7794 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7796 snd_pcm_close( phandle );
7797 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7798 errorText_ = errorStream_.str();
7801 deviceChannels = value;
7802 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7803 stream_.nDeviceChannels[mode] = deviceChannels;
7805 // Set the device channels.
7806 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7808 snd_pcm_close( phandle );
7809 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7810 errorText_ = errorStream_.str();
7814 // Set the buffer (or period) size.
7816 snd_pcm_uframes_t periodSize = *bufferSize;
7817 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7819 snd_pcm_close( phandle );
7820 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7821 errorText_ = errorStream_.str();
7824 *bufferSize = periodSize;
7826 // Set the buffer number, which in ALSA is referred to as the "period".
7827 unsigned int periods = 0;
7828 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7829 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7830 if ( periods < 2 ) periods = 4; // a fairly safe default value
7831 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7833 snd_pcm_close( phandle );
7834 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7835 errorText_ = errorStream_.str();
7839 // If attempting to setup a duplex stream, the bufferSize parameter
7840 // MUST be the same in both directions!
7841 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7842 snd_pcm_close( phandle );
7843 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7844 errorText_ = errorStream_.str();
7848 stream_.bufferSize = *bufferSize;
7850 // Install the hardware configuration
7851 result = snd_pcm_hw_params( phandle, hw_params );
7853 snd_pcm_close( phandle );
7854 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7855 errorText_ = errorStream_.str();
7859 #if defined(__RTAUDIO_DEBUG__)
7860 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7861 snd_pcm_hw_params_dump( hw_params, out );
7864 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7865 snd_pcm_sw_params_t *sw_params = NULL;
7866 snd_pcm_sw_params_alloca( &sw_params );
7867 snd_pcm_sw_params_current( phandle, sw_params );
7868 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7869 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7870 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7872 // The following two settings were suggested by Theo Veenker
7873 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7874 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7876 // here are two options for a fix
7877 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7878 snd_pcm_uframes_t val;
7879 snd_pcm_sw_params_get_boundary( sw_params, &val );
7880 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7882 result = snd_pcm_sw_params( phandle, sw_params );
7884 snd_pcm_close( phandle );
7885 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7886 errorText_ = errorStream_.str();
7890 #if defined(__RTAUDIO_DEBUG__)
7891 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7892 snd_pcm_sw_params_dump( sw_params, out );
7895 // Set flags for buffer conversion
7896 stream_.doConvertBuffer[mode] = false;
7897 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7898 stream_.doConvertBuffer[mode] = true;
7899 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7900 stream_.doConvertBuffer[mode] = true;
7901 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7902 stream_.nUserChannels[mode] > 1 )
7903 stream_.doConvertBuffer[mode] = true;
7905 // Allocate the ApiHandle if necessary and then save.
7906 AlsaHandle *apiInfo = 0;
7907 if ( stream_.apiHandle == 0 ) {
7909 apiInfo = (AlsaHandle *) new AlsaHandle;
7911 catch ( std::bad_alloc& ) {
7912 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7916 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7917 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7921 stream_.apiHandle = (void *) apiInfo;
7922 apiInfo->handles[0] = 0;
7923 apiInfo->handles[1] = 0;
7926 apiInfo = (AlsaHandle *) stream_.apiHandle;
7928 apiInfo->handles[mode] = phandle;
7931 // Allocate necessary internal buffers.
7932 unsigned long bufferBytes;
7933 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7934 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7935 if ( stream_.userBuffer[mode] == NULL ) {
7936 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7940 if ( stream_.doConvertBuffer[mode] ) {
7942 bool makeBuffer = true;
7943 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7944 if ( mode == INPUT ) {
7945 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7946 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7947 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7952 bufferBytes *= *bufferSize;
7953 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7954 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7955 if ( stream_.deviceBuffer == NULL ) {
7956 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7962 stream_.sampleRate = sampleRate;
7963 stream_.nBuffers = periods;
7964 stream_.device[mode] = device;
7965 stream_.state = STREAM_STOPPED;
7967 // Setup the buffer conversion information structure.
7968 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7970 // Setup thread if necessary.
7971 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7972 // We had already set up an output stream.
7973 stream_.mode = DUPLEX;
7974 // Link the streams if possible.
7975 apiInfo->synchronized = false;
7976 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7977 apiInfo->synchronized = true;
7979 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7980 error( RtAudioError::WARNING );
7984 stream_.mode = mode;
7986 // Setup callback thread.
7987 stream_.callbackInfo.object = (void *) this;
7989 // Set the thread attributes for joinable and realtime scheduling
7990 // priority (optional). The higher priority will only take affect
7991 // if the program is run as root or suid. Note, under Linux
7992 // processes with CAP_SYS_NICE privilege, a user can change
7993 // scheduling policy and priority (thus need not be root). See
7994 // POSIX "capabilities".
7995 pthread_attr_t attr;
7996 pthread_attr_init( &attr );
7997 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7998 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7999 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8000 stream_.callbackInfo.doRealtime = true;
8001 struct sched_param param;
8002 int priority = options->priority;
8003 int min = sched_get_priority_min( SCHED_RR );
8004 int max = sched_get_priority_max( SCHED_RR );
8005 if ( priority < min ) priority = min;
8006 else if ( priority > max ) priority = max;
8007 param.sched_priority = priority;
8009 // Set the policy BEFORE the priority. Otherwise it fails.
8010 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8011 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8012 // This is definitely required. Otherwise it fails.
8013 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8014 pthread_attr_setschedparam(&attr, ¶m);
8017 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8019 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8022 stream_.callbackInfo.isRunning = true;
8023 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8024 pthread_attr_destroy( &attr );
8026 // Failed. Try instead with default attributes.
8027 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8029 stream_.callbackInfo.isRunning = false;
8030 errorText_ = "RtApiAlsa::error creating callback thread!";
8040 pthread_cond_destroy( &apiInfo->runnable_cv );
8041 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8042 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8044 stream_.apiHandle = 0;
8047 if ( phandle) snd_pcm_close( phandle );
8049 for ( int i=0; i<2; i++ ) {
8050 if ( stream_.userBuffer[i] ) {
8051 free( stream_.userBuffer[i] );
8052 stream_.userBuffer[i] = 0;
8056 if ( stream_.deviceBuffer ) {
8057 free( stream_.deviceBuffer );
8058 stream_.deviceBuffer = 0;
8061 stream_.state = STREAM_CLOSED;
8065 void RtApiAlsa :: closeStream()
8067 if ( stream_.state == STREAM_CLOSED ) {
8068 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8069 error( RtAudioError::WARNING );
8073 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8074 stream_.callbackInfo.isRunning = false;
8075 MUTEX_LOCK( &stream_.mutex );
8076 if ( stream_.state == STREAM_STOPPED ) {
8077 apiInfo->runnable = true;
8078 pthread_cond_signal( &apiInfo->runnable_cv );
8080 MUTEX_UNLOCK( &stream_.mutex );
8081 pthread_join( stream_.callbackInfo.thread, NULL );
8083 if ( stream_.state == STREAM_RUNNING ) {
8084 stream_.state = STREAM_STOPPED;
8085 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8086 snd_pcm_drop( apiInfo->handles[0] );
8087 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8088 snd_pcm_drop( apiInfo->handles[1] );
8092 pthread_cond_destroy( &apiInfo->runnable_cv );
8093 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8094 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8096 stream_.apiHandle = 0;
8099 for ( int i=0; i<2; i++ ) {
8100 if ( stream_.userBuffer[i] ) {
8101 free( stream_.userBuffer[i] );
8102 stream_.userBuffer[i] = 0;
8106 if ( stream_.deviceBuffer ) {
8107 free( stream_.deviceBuffer );
8108 stream_.deviceBuffer = 0;
8111 stream_.mode = UNINITIALIZED;
8112 stream_.state = STREAM_CLOSED;
8115 void RtApiAlsa :: startStream()
8117 // This method calls snd_pcm_prepare if the device isn't already in that state.
8120 if ( stream_.state == STREAM_RUNNING ) {
8121 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8122 error( RtAudioError::WARNING );
8126 MUTEX_LOCK( &stream_.mutex );
8128 #if defined( HAVE_GETTIMEOFDAY )
8129 gettimeofday( &stream_.lastTickTimestamp, NULL );
8133 snd_pcm_state_t state;
8134 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8135 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8136 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8137 state = snd_pcm_state( handle[0] );
8138 if ( state != SND_PCM_STATE_PREPARED ) {
8139 result = snd_pcm_prepare( handle[0] );
8141 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8142 errorText_ = errorStream_.str();
8148 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8149 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8150 state = snd_pcm_state( handle[1] );
8151 if ( state != SND_PCM_STATE_PREPARED ) {
8152 result = snd_pcm_prepare( handle[1] );
8154 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8155 errorText_ = errorStream_.str();
8161 stream_.state = STREAM_RUNNING;
8164 apiInfo->runnable = true;
8165 pthread_cond_signal( &apiInfo->runnable_cv );
8166 MUTEX_UNLOCK( &stream_.mutex );
8168 if ( result >= 0 ) return;
8169 error( RtAudioError::SYSTEM_ERROR );
8172 void RtApiAlsa :: stopStream()
8175 if ( stream_.state == STREAM_STOPPED ) {
8176 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8177 error( RtAudioError::WARNING );
8181 stream_.state = STREAM_STOPPED;
8182 MUTEX_LOCK( &stream_.mutex );
8185 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8186 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8187 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8188 if ( apiInfo->synchronized )
8189 result = snd_pcm_drop( handle[0] );
8191 result = snd_pcm_drain( handle[0] );
8193 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8194 errorText_ = errorStream_.str();
8199 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8200 result = snd_pcm_drop( handle[1] );
8202 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8203 errorText_ = errorStream_.str();
8209 apiInfo->runnable = false; // fixes high CPU usage when stopped
8210 MUTEX_UNLOCK( &stream_.mutex );
8212 if ( result >= 0 ) return;
8213 error( RtAudioError::SYSTEM_ERROR );
8216 void RtApiAlsa :: abortStream()
8219 if ( stream_.state == STREAM_STOPPED ) {
8220 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8221 error( RtAudioError::WARNING );
8225 stream_.state = STREAM_STOPPED;
8226 MUTEX_LOCK( &stream_.mutex );
8229 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8230 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8231 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8232 result = snd_pcm_drop( handle[0] );
8234 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8235 errorText_ = errorStream_.str();
8240 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8241 result = snd_pcm_drop( handle[1] );
8243 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8244 errorText_ = errorStream_.str();
8250 apiInfo->runnable = false; // fixes high CPU usage when stopped
8251 MUTEX_UNLOCK( &stream_.mutex );
8253 if ( result >= 0 ) return;
8254 error( RtAudioError::SYSTEM_ERROR );
8257 void RtApiAlsa :: callbackEvent()
8259 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8260 if ( stream_.state == STREAM_STOPPED ) {
8261 MUTEX_LOCK( &stream_.mutex );
8262 while ( !apiInfo->runnable )
8263 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8265 if ( stream_.state != STREAM_RUNNING ) {
8266 MUTEX_UNLOCK( &stream_.mutex );
8269 MUTEX_UNLOCK( &stream_.mutex );
8272 if ( stream_.state == STREAM_CLOSED ) {
8273 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8274 error( RtAudioError::WARNING );
8278 int doStopStream = 0;
8279 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8280 double streamTime = getStreamTime();
8281 RtAudioStreamStatus status = 0;
8282 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8283 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8284 apiInfo->xrun[0] = false;
8286 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8287 status |= RTAUDIO_INPUT_OVERFLOW;
8288 apiInfo->xrun[1] = false;
8290 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8291 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8293 if ( doStopStream == 2 ) {
8298 MUTEX_LOCK( &stream_.mutex );
8300 // The state might change while waiting on a mutex.
8301 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8307 snd_pcm_sframes_t frames;
8308 RtAudioFormat format;
8309 handle = (snd_pcm_t **) apiInfo->handles;
8311 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8313 // Setup parameters.
8314 if ( stream_.doConvertBuffer[1] ) {
8315 buffer = stream_.deviceBuffer;
8316 channels = stream_.nDeviceChannels[1];
8317 format = stream_.deviceFormat[1];
8320 buffer = stream_.userBuffer[1];
8321 channels = stream_.nUserChannels[1];
8322 format = stream_.userFormat;
8325 // Read samples from device in interleaved/non-interleaved format.
8326 if ( stream_.deviceInterleaved[1] )
8327 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8329 void *bufs[channels];
8330 size_t offset = stream_.bufferSize * formatBytes( format );
8331 for ( int i=0; i<channels; i++ )
8332 bufs[i] = (void *) (buffer + (i * offset));
8333 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8336 if ( result < (int) stream_.bufferSize ) {
8337 // Either an error or overrun occured.
8338 if ( result == -EPIPE ) {
8339 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8340 if ( state == SND_PCM_STATE_XRUN ) {
8341 apiInfo->xrun[1] = true;
8342 result = snd_pcm_prepare( handle[1] );
8344 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8345 errorText_ = errorStream_.str();
8349 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8350 errorText_ = errorStream_.str();
8354 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8355 errorText_ = errorStream_.str();
8357 error( RtAudioError::WARNING );
8361 // Do byte swapping if necessary.
8362 if ( stream_.doByteSwap[1] )
8363 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8365 // Do buffer conversion if necessary.
8366 if ( stream_.doConvertBuffer[1] )
8367 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8369 // Check stream latency
8370 result = snd_pcm_delay( handle[1], &frames );
8371 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8378 // Setup parameters and do buffer conversion if necessary.
8379 if ( stream_.doConvertBuffer[0] ) {
8380 buffer = stream_.deviceBuffer;
8381 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8382 channels = stream_.nDeviceChannels[0];
8383 format = stream_.deviceFormat[0];
8386 buffer = stream_.userBuffer[0];
8387 channels = stream_.nUserChannels[0];
8388 format = stream_.userFormat;
8391 // Do byte swapping if necessary.
8392 if ( stream_.doByteSwap[0] )
8393 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8395 // Write samples to device in interleaved/non-interleaved format.
8396 if ( stream_.deviceInterleaved[0] )
8397 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8399 void *bufs[channels];
8400 size_t offset = stream_.bufferSize * formatBytes( format );
8401 for ( int i=0; i<channels; i++ )
8402 bufs[i] = (void *) (buffer + (i * offset));
8403 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8406 if ( result < (int) stream_.bufferSize ) {
8407 // Either an error or underrun occured.
8408 if ( result == -EPIPE ) {
8409 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8410 if ( state == SND_PCM_STATE_XRUN ) {
8411 apiInfo->xrun[0] = true;
8412 result = snd_pcm_prepare( handle[0] );
8414 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8415 errorText_ = errorStream_.str();
8418 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8421 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8422 errorText_ = errorStream_.str();
8426 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8427 errorText_ = errorStream_.str();
8429 error( RtAudioError::WARNING );
8433 // Check stream latency
8434 result = snd_pcm_delay( handle[0], &frames );
8435 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8439 MUTEX_UNLOCK( &stream_.mutex );
8441 RtApi::tickStreamTime();
8442 if ( doStopStream == 1 ) this->stopStream();
8445 static void *alsaCallbackHandler( void *ptr )
8447 CallbackInfo *info = (CallbackInfo *) ptr;
8448 RtApiAlsa *object = (RtApiAlsa *) info->object;
8449 bool *isRunning = &info->isRunning;
8451 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8452 if ( info->doRealtime ) {
8453 std::cerr << "RtAudio alsa: " <<
8454 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8455 "running realtime scheduling" << std::endl;
8459 while ( *isRunning == true ) {
8460 pthread_testcancel();
8461 object->callbackEvent();
8464 pthread_exit( NULL );
8467 //******************** End of __LINUX_ALSA__ *********************//
8470 #if defined(__LINUX_PULSE__)
8472 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8473 // and Tristan Matthews.
8475 #include <pulse/error.h>
8476 #include <pulse/simple.h>
8479 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8480 44100, 48000, 96000, 0};
8482 struct rtaudio_pa_format_mapping_t {
8483 RtAudioFormat rtaudio_format;
8484 pa_sample_format_t pa_format;
8487 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8488 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8489 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8490 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8491 {0, PA_SAMPLE_INVALID}};
8493 struct PulseAudioHandle {
8497 pthread_cond_t runnable_cv;
8499 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8502 RtApiPulse::~RtApiPulse()
8504 if ( stream_.state != STREAM_CLOSED )
8508 unsigned int RtApiPulse::getDeviceCount( void )
8513 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8515 RtAudio::DeviceInfo info;
8517 info.name = "PulseAudio";
8518 info.outputChannels = 2;
8519 info.inputChannels = 2;
8520 info.duplexChannels = 2;
8521 info.isDefaultOutput = true;
8522 info.isDefaultInput = true;
8524 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8525 info.sampleRates.push_back( *sr );
8527 info.preferredSampleRate = 48000;
8528 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8533 static void *pulseaudio_callback( void * user )
8535 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8536 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8537 volatile bool *isRunning = &cbi->isRunning;
8539 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8540 if (cbi->doRealtime) {
8541 std::cerr << "RtAudio pulse: " <<
8542 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8543 "running realtime scheduling" << std::endl;
8547 while ( *isRunning ) {
8548 pthread_testcancel();
8549 context->callbackEvent();
8552 pthread_exit( NULL );
8555 void RtApiPulse::closeStream( void )
8557 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8559 stream_.callbackInfo.isRunning = false;
8561 MUTEX_LOCK( &stream_.mutex );
8562 if ( stream_.state == STREAM_STOPPED ) {
8563 pah->runnable = true;
8564 pthread_cond_signal( &pah->runnable_cv );
8566 MUTEX_UNLOCK( &stream_.mutex );
8568 pthread_join( pah->thread, 0 );
8569 if ( pah->s_play ) {
8570 pa_simple_flush( pah->s_play, NULL );
8571 pa_simple_free( pah->s_play );
8574 pa_simple_free( pah->s_rec );
8576 pthread_cond_destroy( &pah->runnable_cv );
8578 stream_.apiHandle = 0;
8581 if ( stream_.userBuffer[0] ) {
8582 free( stream_.userBuffer[0] );
8583 stream_.userBuffer[0] = 0;
8585 if ( stream_.userBuffer[1] ) {
8586 free( stream_.userBuffer[1] );
8587 stream_.userBuffer[1] = 0;
8590 stream_.state = STREAM_CLOSED;
8591 stream_.mode = UNINITIALIZED;
8594 void RtApiPulse::callbackEvent( void )
8596 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8598 if ( stream_.state == STREAM_STOPPED ) {
8599 MUTEX_LOCK( &stream_.mutex );
8600 while ( !pah->runnable )
8601 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8603 if ( stream_.state != STREAM_RUNNING ) {
8604 MUTEX_UNLOCK( &stream_.mutex );
8607 MUTEX_UNLOCK( &stream_.mutex );
8610 if ( stream_.state == STREAM_CLOSED ) {
8611 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8612 "this shouldn't happen!";
8613 error( RtAudioError::WARNING );
8617 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8618 double streamTime = getStreamTime();
8619 RtAudioStreamStatus status = 0;
8620 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8621 stream_.bufferSize, streamTime, status,
8622 stream_.callbackInfo.userData );
8624 if ( doStopStream == 2 ) {
8629 MUTEX_LOCK( &stream_.mutex );
8630 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8631 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8633 if ( stream_.state != STREAM_RUNNING )
8638 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8639 if ( stream_.doConvertBuffer[OUTPUT] ) {
8640 convertBuffer( stream_.deviceBuffer,
8641 stream_.userBuffer[OUTPUT],
8642 stream_.convertInfo[OUTPUT] );
8643 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8644 formatBytes( stream_.deviceFormat[OUTPUT] );
8646 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8647 formatBytes( stream_.userFormat );
8649 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8650 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8651 pa_strerror( pa_error ) << ".";
8652 errorText_ = errorStream_.str();
8653 error( RtAudioError::WARNING );
8657 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8658 if ( stream_.doConvertBuffer[INPUT] )
8659 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8660 formatBytes( stream_.deviceFormat[INPUT] );
8662 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8663 formatBytes( stream_.userFormat );
8665 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8666 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8667 pa_strerror( pa_error ) << ".";
8668 errorText_ = errorStream_.str();
8669 error( RtAudioError::WARNING );
8671 if ( stream_.doConvertBuffer[INPUT] ) {
8672 convertBuffer( stream_.userBuffer[INPUT],
8673 stream_.deviceBuffer,
8674 stream_.convertInfo[INPUT] );
8679 MUTEX_UNLOCK( &stream_.mutex );
8680 RtApi::tickStreamTime();
8682 if ( doStopStream == 1 )
8686 void RtApiPulse::startStream( void )
8688 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8690 if ( stream_.state == STREAM_CLOSED ) {
8691 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8692 error( RtAudioError::INVALID_USE );
8695 if ( stream_.state == STREAM_RUNNING ) {
8696 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8697 error( RtAudioError::WARNING );
8701 MUTEX_LOCK( &stream_.mutex );
8703 #if defined( HAVE_GETTIMEOFDAY )
8704 gettimeofday( &stream_.lastTickTimestamp, NULL );
8707 stream_.state = STREAM_RUNNING;
8709 pah->runnable = true;
8710 pthread_cond_signal( &pah->runnable_cv );
8711 MUTEX_UNLOCK( &stream_.mutex );
8714 void RtApiPulse::stopStream( void )
8716 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8718 if ( stream_.state == STREAM_CLOSED ) {
8719 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8720 error( RtAudioError::INVALID_USE );
8723 if ( stream_.state == STREAM_STOPPED ) {
8724 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8725 error( RtAudioError::WARNING );
8729 stream_.state = STREAM_STOPPED;
8730 MUTEX_LOCK( &stream_.mutex );
8732 if ( pah && pah->s_play ) {
8734 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8735 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8736 pa_strerror( pa_error ) << ".";
8737 errorText_ = errorStream_.str();
8738 MUTEX_UNLOCK( &stream_.mutex );
8739 error( RtAudioError::SYSTEM_ERROR );
8744 stream_.state = STREAM_STOPPED;
8745 MUTEX_UNLOCK( &stream_.mutex );
8748 void RtApiPulse::abortStream( void )
8750 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8752 if ( stream_.state == STREAM_CLOSED ) {
8753 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8754 error( RtAudioError::INVALID_USE );
8757 if ( stream_.state == STREAM_STOPPED ) {
8758 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8759 error( RtAudioError::WARNING );
8763 stream_.state = STREAM_STOPPED;
8764 MUTEX_LOCK( &stream_.mutex );
8766 if ( pah && pah->s_play ) {
8768 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8769 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8770 pa_strerror( pa_error ) << ".";
8771 errorText_ = errorStream_.str();
8772 MUTEX_UNLOCK( &stream_.mutex );
8773 error( RtAudioError::SYSTEM_ERROR );
8778 stream_.state = STREAM_STOPPED;
8779 MUTEX_UNLOCK( &stream_.mutex );
8782 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8783 unsigned int channels, unsigned int firstChannel,
8784 unsigned int sampleRate, RtAudioFormat format,
8785 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8787 PulseAudioHandle *pah = 0;
8788 unsigned long bufferBytes = 0;
8791 if ( device != 0 ) return false;
8792 if ( mode != INPUT && mode != OUTPUT ) return false;
8793 if ( channels != 1 && channels != 2 ) {
8794 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8797 ss.channels = channels;
8799 if ( firstChannel != 0 ) return false;
8801 bool sr_found = false;
8802 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8803 if ( sampleRate == *sr ) {
8805 stream_.sampleRate = sampleRate;
8806 ss.rate = sampleRate;
8811 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8816 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8817 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8818 if ( format == sf->rtaudio_format ) {
8820 stream_.userFormat = sf->rtaudio_format;
8821 stream_.deviceFormat[mode] = stream_.userFormat;
8822 ss.format = sf->pa_format;
8826 if ( !sf_found ) { // Use internal data format conversion.
8827 stream_.userFormat = format;
8828 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8829 ss.format = PA_SAMPLE_FLOAT32LE;
8832 // Set other stream parameters.
8833 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8834 else stream_.userInterleaved = true;
8835 stream_.deviceInterleaved[mode] = true;
8836 stream_.nBuffers = 1;
8837 stream_.doByteSwap[mode] = false;
8838 stream_.nUserChannels[mode] = channels;
8839 stream_.nDeviceChannels[mode] = channels + firstChannel;
8840 stream_.channelOffset[mode] = 0;
8841 std::string streamName = "RtAudio";
8843 // Set flags for buffer conversion.
8844 stream_.doConvertBuffer[mode] = false;
8845 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8846 stream_.doConvertBuffer[mode] = true;
8847 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8848 stream_.doConvertBuffer[mode] = true;
8850 // Allocate necessary internal buffers.
8851 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8852 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8853 if ( stream_.userBuffer[mode] == NULL ) {
8854 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8857 stream_.bufferSize = *bufferSize;
8859 if ( stream_.doConvertBuffer[mode] ) {
8861 bool makeBuffer = true;
8862 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8863 if ( mode == INPUT ) {
8864 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8865 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8866 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8871 bufferBytes *= *bufferSize;
8872 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8873 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8874 if ( stream_.deviceBuffer == NULL ) {
8875 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8881 stream_.device[mode] = device;
8883 // Setup the buffer conversion information structure.
8884 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8886 if ( !stream_.apiHandle ) {
8887 PulseAudioHandle *pah = new PulseAudioHandle;
8889 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8893 stream_.apiHandle = pah;
8894 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8895 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8899 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8902 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8905 pa_buffer_attr buffer_attr;
8906 buffer_attr.fragsize = bufferBytes;
8907 buffer_attr.maxlength = -1;
8909 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8910 if ( !pah->s_rec ) {
8911 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8916 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8917 if ( !pah->s_play ) {
8918 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8926 if ( stream_.mode == UNINITIALIZED )
8927 stream_.mode = mode;
8928 else if ( stream_.mode == mode )
8931 stream_.mode = DUPLEX;
8933 if ( !stream_.callbackInfo.isRunning ) {
8934 stream_.callbackInfo.object = this;
8936 stream_.state = STREAM_STOPPED;
8937 // Set the thread attributes for joinable and realtime scheduling
8938 // priority (optional). The higher priority will only take affect
8939 // if the program is run as root or suid. Note, under Linux
8940 // processes with CAP_SYS_NICE privilege, a user can change
8941 // scheduling policy and priority (thus need not be root). See
8942 // POSIX "capabilities".
8943 pthread_attr_t attr;
8944 pthread_attr_init( &attr );
8945 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8946 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8947 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8948 stream_.callbackInfo.doRealtime = true;
8949 struct sched_param param;
8950 int priority = options->priority;
8951 int min = sched_get_priority_min( SCHED_RR );
8952 int max = sched_get_priority_max( SCHED_RR );
8953 if ( priority < min ) priority = min;
8954 else if ( priority > max ) priority = max;
8955 param.sched_priority = priority;
8957 // Set the policy BEFORE the priority. Otherwise it fails.
8958 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8959 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8960 // This is definitely required. Otherwise it fails.
8961 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8962 pthread_attr_setschedparam(&attr, ¶m);
8965 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8967 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8970 stream_.callbackInfo.isRunning = true;
8971 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8972 pthread_attr_destroy(&attr);
8974 // Failed. Try instead with default attributes.
8975 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8977 stream_.callbackInfo.isRunning = false;
8978 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8987 if ( pah && stream_.callbackInfo.isRunning ) {
8988 pthread_cond_destroy( &pah->runnable_cv );
8990 stream_.apiHandle = 0;
8993 for ( int i=0; i<2; i++ ) {
8994 if ( stream_.userBuffer[i] ) {
8995 free( stream_.userBuffer[i] );
8996 stream_.userBuffer[i] = 0;
9000 if ( stream_.deviceBuffer ) {
9001 free( stream_.deviceBuffer );
9002 stream_.deviceBuffer = 0;
9005 stream_.state = STREAM_CLOSED;
9009 //******************** End of __LINUX_PULSE__ *********************//
9012 #if defined(__LINUX_OSS__)
9015 #include <sys/ioctl.h>
9018 #include <sys/soundcard.h>
9022 static void *ossCallbackHandler(void * ptr);
9024 // A structure to hold various information related to the OSS API
9027 int id[2]; // device ids
9030 pthread_cond_t runnable;
9033 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9036 RtApiOss :: RtApiOss()
9038 // Nothing to do here.
9041 RtApiOss :: ~RtApiOss()
9043 if ( stream_.state != STREAM_CLOSED ) closeStream();
9046 unsigned int RtApiOss :: getDeviceCount( void )
9048 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9049 if ( mixerfd == -1 ) {
9050 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9051 error( RtAudioError::WARNING );
9055 oss_sysinfo sysinfo;
9056 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9058 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9059 error( RtAudioError::WARNING );
9064 return sysinfo.numaudios;
9067 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9069 RtAudio::DeviceInfo info;
9070 info.probed = false;
9072 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9073 if ( mixerfd == -1 ) {
9074 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9075 error( RtAudioError::WARNING );
9079 oss_sysinfo sysinfo;
9080 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9081 if ( result == -1 ) {
9083 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9084 error( RtAudioError::WARNING );
9088 unsigned nDevices = sysinfo.numaudios;
9089 if ( nDevices == 0 ) {
9091 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9092 error( RtAudioError::INVALID_USE );
9096 if ( device >= nDevices ) {
9098 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9099 error( RtAudioError::INVALID_USE );
9103 oss_audioinfo ainfo;
9105 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9107 if ( result == -1 ) {
9108 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9109 errorText_ = errorStream_.str();
9110 error( RtAudioError::WARNING );
9115 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9116 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9117 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9118 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9119 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9122 // Probe data formats ... do for input
9123 unsigned long mask = ainfo.iformats;
9124 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9125 info.nativeFormats |= RTAUDIO_SINT16;
9126 if ( mask & AFMT_S8 )
9127 info.nativeFormats |= RTAUDIO_SINT8;
9128 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9129 info.nativeFormats |= RTAUDIO_SINT32;
9131 if ( mask & AFMT_FLOAT )
9132 info.nativeFormats |= RTAUDIO_FLOAT32;
9134 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9135 info.nativeFormats |= RTAUDIO_SINT24;
9137 // Check that we have at least one supported format
9138 if ( info.nativeFormats == 0 ) {
9139 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9140 errorText_ = errorStream_.str();
9141 error( RtAudioError::WARNING );
9145 // Probe the supported sample rates.
9146 info.sampleRates.clear();
9147 if ( ainfo.nrates ) {
9148 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9149 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9150 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9151 info.sampleRates.push_back( SAMPLE_RATES[k] );
9153 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9154 info.preferredSampleRate = SAMPLE_RATES[k];
9162 // Check min and max rate values;
9163 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9164 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9165 info.sampleRates.push_back( SAMPLE_RATES[k] );
9167 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9168 info.preferredSampleRate = SAMPLE_RATES[k];
9173 if ( info.sampleRates.size() == 0 ) {
9174 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9175 errorText_ = errorStream_.str();
9176 error( RtAudioError::WARNING );
9180 info.name = ainfo.name;
9187 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9188 unsigned int firstChannel, unsigned int sampleRate,
9189 RtAudioFormat format, unsigned int *bufferSize,
9190 RtAudio::StreamOptions *options )
9192 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9193 if ( mixerfd == -1 ) {
9194 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9198 oss_sysinfo sysinfo;
9199 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9200 if ( result == -1 ) {
9202 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9206 unsigned nDevices = sysinfo.numaudios;
9207 if ( nDevices == 0 ) {
9208 // This should not happen because a check is made before this function is called.
9210 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9214 if ( device >= nDevices ) {
9215 // This should not happen because a check is made before this function is called.
9217 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9221 oss_audioinfo ainfo;
9223 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9225 if ( result == -1 ) {
9226 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9227 errorText_ = errorStream_.str();
9231 // Check if device supports input or output
9232 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9233 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9234 if ( mode == OUTPUT )
9235 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9237 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9238 errorText_ = errorStream_.str();
9243 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9244 if ( mode == OUTPUT )
9246 else { // mode == INPUT
9247 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9248 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9249 close( handle->id[0] );
9251 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9252 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9253 errorText_ = errorStream_.str();
9256 // Check that the number previously set channels is the same.
9257 if ( stream_.nUserChannels[0] != channels ) {
9258 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9259 errorText_ = errorStream_.str();
9268 // Set exclusive access if specified.
9269 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9271 // Try to open the device.
9273 fd = open( ainfo.devnode, flags, 0 );
9275 if ( errno == EBUSY )
9276 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9278 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9279 errorText_ = errorStream_.str();
9283 // For duplex operation, specifically set this mode (this doesn't seem to work).
9285 if ( flags | O_RDWR ) {
9286 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9287 if ( result == -1) {
9288 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9289 errorText_ = errorStream_.str();
9295 // Check the device channel support.
9296 stream_.nUserChannels[mode] = channels;
9297 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9299 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9300 errorText_ = errorStream_.str();
9304 // Set the number of channels.
9305 int deviceChannels = channels + firstChannel;
9306 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9307 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9309 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9310 errorText_ = errorStream_.str();
9313 stream_.nDeviceChannels[mode] = deviceChannels;
9315 // Get the data format mask
9317 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9318 if ( result == -1 ) {
9320 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9321 errorText_ = errorStream_.str();
9325 // Determine how to set the device format.
9326 stream_.userFormat = format;
9327 int deviceFormat = -1;
9328 stream_.doByteSwap[mode] = false;
9329 if ( format == RTAUDIO_SINT8 ) {
9330 if ( mask & AFMT_S8 ) {
9331 deviceFormat = AFMT_S8;
9332 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9335 else if ( format == RTAUDIO_SINT16 ) {
9336 if ( mask & AFMT_S16_NE ) {
9337 deviceFormat = AFMT_S16_NE;
9338 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9340 else if ( mask & AFMT_S16_OE ) {
9341 deviceFormat = AFMT_S16_OE;
9342 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9343 stream_.doByteSwap[mode] = true;
9346 else if ( format == RTAUDIO_SINT24 ) {
9347 if ( mask & AFMT_S24_NE ) {
9348 deviceFormat = AFMT_S24_NE;
9349 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9351 else if ( mask & AFMT_S24_OE ) {
9352 deviceFormat = AFMT_S24_OE;
9353 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9354 stream_.doByteSwap[mode] = true;
9357 else if ( format == RTAUDIO_SINT32 ) {
9358 if ( mask & AFMT_S32_NE ) {
9359 deviceFormat = AFMT_S32_NE;
9360 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9362 else if ( mask & AFMT_S32_OE ) {
9363 deviceFormat = AFMT_S32_OE;
9364 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9365 stream_.doByteSwap[mode] = true;
9369 if ( deviceFormat == -1 ) {
9370 // The user requested format is not natively supported by the device.
9371 if ( mask & AFMT_S16_NE ) {
9372 deviceFormat = AFMT_S16_NE;
9373 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9375 else if ( mask & AFMT_S32_NE ) {
9376 deviceFormat = AFMT_S32_NE;
9377 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9379 else if ( mask & AFMT_S24_NE ) {
9380 deviceFormat = AFMT_S24_NE;
9381 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9383 else if ( mask & AFMT_S16_OE ) {
9384 deviceFormat = AFMT_S16_OE;
9385 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9386 stream_.doByteSwap[mode] = true;
9388 else if ( mask & AFMT_S32_OE ) {
9389 deviceFormat = AFMT_S32_OE;
9390 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9391 stream_.doByteSwap[mode] = true;
9393 else if ( mask & AFMT_S24_OE ) {
9394 deviceFormat = AFMT_S24_OE;
9395 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9396 stream_.doByteSwap[mode] = true;
9398 else if ( mask & AFMT_S8) {
9399 deviceFormat = AFMT_S8;
9400 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9404 if ( stream_.deviceFormat[mode] == 0 ) {
9405 // This really shouldn't happen ...
9407 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9408 errorText_ = errorStream_.str();
9412 // Set the data format.
9413 int temp = deviceFormat;
9414 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9415 if ( result == -1 || deviceFormat != temp ) {
9417 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9418 errorText_ = errorStream_.str();
9422 // Attempt to set the buffer size. According to OSS, the minimum
9423 // number of buffers is two. The supposed minimum buffer size is 16
9424 // bytes, so that will be our lower bound. The argument to this
9425 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9426 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9427 // We'll check the actual value used near the end of the setup
9429 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9430 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9432 if ( options ) buffers = options->numberOfBuffers;
9433 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9434 if ( buffers < 2 ) buffers = 3;
9435 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9436 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9437 if ( result == -1 ) {
9439 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9440 errorText_ = errorStream_.str();
9443 stream_.nBuffers = buffers;
9445 // Save buffer size (in sample frames).
9446 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9447 stream_.bufferSize = *bufferSize;
9449 // Set the sample rate.
9450 int srate = sampleRate;
9451 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9452 if ( result == -1 ) {
9454 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9455 errorText_ = errorStream_.str();
9459 // Verify the sample rate setup worked.
9460 if ( abs( srate - (int)sampleRate ) > 100 ) {
9462 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9463 errorText_ = errorStream_.str();
9466 stream_.sampleRate = sampleRate;
9468 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9469 // We're doing duplex setup here.
9470 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9471 stream_.nDeviceChannels[0] = deviceChannels;
9474 // Set interleaving parameters.
9475 stream_.userInterleaved = true;
9476 stream_.deviceInterleaved[mode] = true;
9477 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9478 stream_.userInterleaved = false;
9480 // Set flags for buffer conversion
9481 stream_.doConvertBuffer[mode] = false;
9482 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9483 stream_.doConvertBuffer[mode] = true;
9484 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9485 stream_.doConvertBuffer[mode] = true;
9486 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9487 stream_.nUserChannels[mode] > 1 )
9488 stream_.doConvertBuffer[mode] = true;
9490 // Allocate the stream handles if necessary and then save.
9491 if ( stream_.apiHandle == 0 ) {
9493 handle = new OssHandle;
9495 catch ( std::bad_alloc& ) {
9496 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9500 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9501 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9505 stream_.apiHandle = (void *) handle;
9508 handle = (OssHandle *) stream_.apiHandle;
9510 handle->id[mode] = fd;
9512 // Allocate necessary internal buffers.
9513 unsigned long bufferBytes;
9514 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9515 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9516 if ( stream_.userBuffer[mode] == NULL ) {
9517 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9521 if ( stream_.doConvertBuffer[mode] ) {
9523 bool makeBuffer = true;
9524 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9525 if ( mode == INPUT ) {
9526 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9527 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9528 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9533 bufferBytes *= *bufferSize;
9534 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9535 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9536 if ( stream_.deviceBuffer == NULL ) {
9537 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9543 stream_.device[mode] = device;
9544 stream_.state = STREAM_STOPPED;
9546 // Setup the buffer conversion information structure.
9547 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9549 // Setup thread if necessary.
9550 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9551 // We had already set up an output stream.
9552 stream_.mode = DUPLEX;
9553 if ( stream_.device[0] == device ) handle->id[0] = fd;
9556 stream_.mode = mode;
9558 // Setup callback thread.
9559 stream_.callbackInfo.object = (void *) this;
9561 // Set the thread attributes for joinable and realtime scheduling
9562 // priority. The higher priority will only take affect if the
9563 // program is run as root or suid.
9564 pthread_attr_t attr;
9565 pthread_attr_init( &attr );
9566 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9567 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9568 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9569 stream_.callbackInfo.doRealtime = true;
9570 struct sched_param param;
9571 int priority = options->priority;
9572 int min = sched_get_priority_min( SCHED_RR );
9573 int max = sched_get_priority_max( SCHED_RR );
9574 if ( priority < min ) priority = min;
9575 else if ( priority > max ) priority = max;
9576 param.sched_priority = priority;
9578 // Set the policy BEFORE the priority. Otherwise it fails.
9579 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9580 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9581 // This is definitely required. Otherwise it fails.
9582 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9583 pthread_attr_setschedparam(&attr, ¶m);
9586 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9588 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9591 stream_.callbackInfo.isRunning = true;
9592 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9593 pthread_attr_destroy( &attr );
9595 // Failed. Try instead with default attributes.
9596 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9598 stream_.callbackInfo.isRunning = false;
9599 errorText_ = "RtApiOss::error creating callback thread!";
9609 pthread_cond_destroy( &handle->runnable );
9610 if ( handle->id[0] ) close( handle->id[0] );
9611 if ( handle->id[1] ) close( handle->id[1] );
9613 stream_.apiHandle = 0;
9616 for ( int i=0; i<2; i++ ) {
9617 if ( stream_.userBuffer[i] ) {
9618 free( stream_.userBuffer[i] );
9619 stream_.userBuffer[i] = 0;
9623 if ( stream_.deviceBuffer ) {
9624 free( stream_.deviceBuffer );
9625 stream_.deviceBuffer = 0;
9628 stream_.state = STREAM_CLOSED;
9632 void RtApiOss :: closeStream()
9634 if ( stream_.state == STREAM_CLOSED ) {
9635 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9636 error( RtAudioError::WARNING );
9640 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9641 stream_.callbackInfo.isRunning = false;
9642 MUTEX_LOCK( &stream_.mutex );
9643 if ( stream_.state == STREAM_STOPPED )
9644 pthread_cond_signal( &handle->runnable );
9645 MUTEX_UNLOCK( &stream_.mutex );
9646 pthread_join( stream_.callbackInfo.thread, NULL );
9648 if ( stream_.state == STREAM_RUNNING ) {
9649 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9650 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9652 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9653 stream_.state = STREAM_STOPPED;
9657 pthread_cond_destroy( &handle->runnable );
9658 if ( handle->id[0] ) close( handle->id[0] );
9659 if ( handle->id[1] ) close( handle->id[1] );
9661 stream_.apiHandle = 0;
9664 for ( int i=0; i<2; i++ ) {
9665 if ( stream_.userBuffer[i] ) {
9666 free( stream_.userBuffer[i] );
9667 stream_.userBuffer[i] = 0;
9671 if ( stream_.deviceBuffer ) {
9672 free( stream_.deviceBuffer );
9673 stream_.deviceBuffer = 0;
9676 stream_.mode = UNINITIALIZED;
9677 stream_.state = STREAM_CLOSED;
9680 void RtApiOss :: startStream()
9683 if ( stream_.state == STREAM_RUNNING ) {
9684 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9685 error( RtAudioError::WARNING );
9689 MUTEX_LOCK( &stream_.mutex );
9691 #if defined( HAVE_GETTIMEOFDAY )
9692 gettimeofday( &stream_.lastTickTimestamp, NULL );
9695 stream_.state = STREAM_RUNNING;
9697 // No need to do anything else here ... OSS automatically starts
9698 // when fed samples.
9700 MUTEX_UNLOCK( &stream_.mutex );
9702 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9703 pthread_cond_signal( &handle->runnable );
9706 void RtApiOss :: stopStream()
9709 if ( stream_.state == STREAM_STOPPED ) {
9710 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9711 error( RtAudioError::WARNING );
9715 MUTEX_LOCK( &stream_.mutex );
9717 // The state might change while waiting on a mutex.
9718 if ( stream_.state == STREAM_STOPPED ) {
9719 MUTEX_UNLOCK( &stream_.mutex );
9724 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9725 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9727 // Flush the output with zeros a few times.
9730 RtAudioFormat format;
9732 if ( stream_.doConvertBuffer[0] ) {
9733 buffer = stream_.deviceBuffer;
9734 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9735 format = stream_.deviceFormat[0];
9738 buffer = stream_.userBuffer[0];
9739 samples = stream_.bufferSize * stream_.nUserChannels[0];
9740 format = stream_.userFormat;
9743 memset( buffer, 0, samples * formatBytes(format) );
9744 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9745 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9746 if ( result == -1 ) {
9747 errorText_ = "RtApiOss::stopStream: audio write error.";
9748 error( RtAudioError::WARNING );
9752 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9753 if ( result == -1 ) {
9754 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9755 errorText_ = errorStream_.str();
9758 handle->triggered = false;
9761 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9762 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9763 if ( result == -1 ) {
9764 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9765 errorText_ = errorStream_.str();
9771 stream_.state = STREAM_STOPPED;
9772 MUTEX_UNLOCK( &stream_.mutex );
9774 if ( result != -1 ) return;
9775 error( RtAudioError::SYSTEM_ERROR );
9778 void RtApiOss :: abortStream()
9781 if ( stream_.state == STREAM_STOPPED ) {
9782 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9783 error( RtAudioError::WARNING );
9787 MUTEX_LOCK( &stream_.mutex );
9789 // The state might change while waiting on a mutex.
9790 if ( stream_.state == STREAM_STOPPED ) {
9791 MUTEX_UNLOCK( &stream_.mutex );
9796 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9797 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9798 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9799 if ( result == -1 ) {
9800 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9801 errorText_ = errorStream_.str();
9804 handle->triggered = false;
9807 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9808 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9809 if ( result == -1 ) {
9810 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9811 errorText_ = errorStream_.str();
9817 stream_.state = STREAM_STOPPED;
9818 MUTEX_UNLOCK( &stream_.mutex );
9820 if ( result != -1 ) return;
9821 error( RtAudioError::SYSTEM_ERROR );
9824 void RtApiOss :: callbackEvent()
9826 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9827 if ( stream_.state == STREAM_STOPPED ) {
9828 MUTEX_LOCK( &stream_.mutex );
9829 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9830 if ( stream_.state != STREAM_RUNNING ) {
9831 MUTEX_UNLOCK( &stream_.mutex );
9834 MUTEX_UNLOCK( &stream_.mutex );
9837 if ( stream_.state == STREAM_CLOSED ) {
9838 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9839 error( RtAudioError::WARNING );
9843 // Invoke user callback to get fresh output data.
9844 int doStopStream = 0;
9845 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9846 double streamTime = getStreamTime();
9847 RtAudioStreamStatus status = 0;
9848 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9849 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9850 handle->xrun[0] = false;
9852 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9853 status |= RTAUDIO_INPUT_OVERFLOW;
9854 handle->xrun[1] = false;
9856 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9857 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9858 if ( doStopStream == 2 ) {
9859 this->abortStream();
9863 MUTEX_LOCK( &stream_.mutex );
9865 // The state might change while waiting on a mutex.
9866 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9871 RtAudioFormat format;
9873 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9875 // Setup parameters and do buffer conversion if necessary.
9876 if ( stream_.doConvertBuffer[0] ) {
9877 buffer = stream_.deviceBuffer;
9878 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9879 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9880 format = stream_.deviceFormat[0];
9883 buffer = stream_.userBuffer[0];
9884 samples = stream_.bufferSize * stream_.nUserChannels[0];
9885 format = stream_.userFormat;
9888 // Do byte swapping if necessary.
9889 if ( stream_.doByteSwap[0] )
9890 byteSwapBuffer( buffer, samples, format );
9892 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9894 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9895 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9896 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9897 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9898 handle->triggered = true;
9901 // Write samples to device.
9902 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9904 if ( result == -1 ) {
9905 // We'll assume this is an underrun, though there isn't a
9906 // specific means for determining that.
9907 handle->xrun[0] = true;
9908 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9909 error( RtAudioError::WARNING );
9910 // Continue on to input section.
9914 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9916 // Setup parameters.
9917 if ( stream_.doConvertBuffer[1] ) {
9918 buffer = stream_.deviceBuffer;
9919 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9920 format = stream_.deviceFormat[1];
9923 buffer = stream_.userBuffer[1];
9924 samples = stream_.bufferSize * stream_.nUserChannels[1];
9925 format = stream_.userFormat;
9928 // Read samples from device.
9929 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9931 if ( result == -1 ) {
9932 // We'll assume this is an overrun, though there isn't a
9933 // specific means for determining that.
9934 handle->xrun[1] = true;
9935 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9936 error( RtAudioError::WARNING );
9940 // Do byte swapping if necessary.
9941 if ( stream_.doByteSwap[1] )
9942 byteSwapBuffer( buffer, samples, format );
9944 // Do buffer conversion if necessary.
9945 if ( stream_.doConvertBuffer[1] )
9946 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9950 MUTEX_UNLOCK( &stream_.mutex );
9952 RtApi::tickStreamTime();
9953 if ( doStopStream == 1 ) this->stopStream();
9956 static void *ossCallbackHandler( void *ptr )
9958 CallbackInfo *info = (CallbackInfo *) ptr;
9959 RtApiOss *object = (RtApiOss *) info->object;
9960 bool *isRunning = &info->isRunning;
9962 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9963 if (info->doRealtime) {
9964 std::cerr << "RtAudio oss: " <<
9965 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9966 "running realtime scheduling" << std::endl;
9970 while ( *isRunning == true ) {
9971 pthread_testcancel();
9972 object->callbackEvent();
9975 pthread_exit( NULL );
9978 //******************** End of __LINUX_OSS__ *********************//
9982 // *************************************************** //
9984 // Protected common (OS-independent) RtAudio methods.
9986 // *************************************************** //
9988 // This method can be modified to control the behavior of error
9989 // message printing.
9990 //void RtApi :: error( RtAudioError::Type type )
9991 RtAudioError::Type RtApi :: error( RtAudioError::Type type )
9993 errorStream_.str(""); // clear the ostringstream to avoid repeated messages
9995 // Don't output warnings if showWarnings_ is false
9996 if ( type == RtAudioError::WARNING && showWarnings_ == false ) return type;
9998 if ( errorCallback_ ) {
9999 const std::string errorMessage = errorText_;
10000 errorCallback_( type, errorMessage );
10003 std::cerr << '\n' << errorText_ << "\n\n";
10008 void RtApi :: verifyStream()
10010 if ( stream_.state == STREAM_CLOSED ) {
10011 errorText_ = "RtApi:: a stream is not open!";
10012 error( RtAudioError::INVALID_USE );
10017 void RtApi :: clearStreamInfo()
10019 stream_.mode = UNINITIALIZED;
10020 stream_.state = STREAM_CLOSED;
10021 stream_.sampleRate = 0;
10022 stream_.bufferSize = 0;
10023 stream_.nBuffers = 0;
10024 stream_.userFormat = 0;
10025 stream_.userInterleaved = true;
10026 stream_.streamTime = 0.0;
10027 stream_.apiHandle = 0;
10028 stream_.deviceBuffer = 0;
10029 stream_.callbackInfo.callback = 0;
10030 stream_.callbackInfo.userData = 0;
10031 stream_.callbackInfo.isRunning = false;
10032 //stream_.callbackInfo.errorCallback = 0;
10033 for ( int i=0; i<2; i++ ) {
10034 stream_.device[i] = 11111;
10035 stream_.doConvertBuffer[i] = false;
10036 stream_.deviceInterleaved[i] = true;
10037 stream_.doByteSwap[i] = false;
10038 stream_.nUserChannels[i] = 0;
10039 stream_.nDeviceChannels[i] = 0;
10040 stream_.channelOffset[i] = 0;
10041 stream_.deviceFormat[i] = 0;
10042 stream_.latency[i] = 0;
10043 stream_.userBuffer[i] = 0;
10044 stream_.convertInfo[i].channels = 0;
10045 stream_.convertInfo[i].inJump = 0;
10046 stream_.convertInfo[i].outJump = 0;
10047 stream_.convertInfo[i].inFormat = 0;
10048 stream_.convertInfo[i].outFormat = 0;
10049 stream_.convertInfo[i].inOffset.clear();
10050 stream_.convertInfo[i].outOffset.clear();
10054 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10056 if ( format == RTAUDIO_SINT16 )
10058 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10060 else if ( format == RTAUDIO_FLOAT64 )
10062 else if ( format == RTAUDIO_SINT24 )
10064 else if ( format == RTAUDIO_SINT8 )
10067 errorText_ = "RtApi::formatBytes: undefined format.";
10068 error( RtAudioError::WARNING );
10073 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10075 if ( mode == INPUT ) { // convert device to user buffer
10076 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10077 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10078 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10079 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10081 else { // convert user to device buffer
10082 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10083 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10084 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10085 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10088 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10089 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10091 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10093 // Set up the interleave/deinterleave offsets.
10094 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10095 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10096 ( mode == INPUT && stream_.userInterleaved ) ) {
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10098 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10099 stream_.convertInfo[mode].outOffset.push_back( k );
10100 stream_.convertInfo[mode].inJump = 1;
10104 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10105 stream_.convertInfo[mode].inOffset.push_back( k );
10106 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10107 stream_.convertInfo[mode].outJump = 1;
10111 else { // no (de)interleaving
10112 if ( stream_.userInterleaved ) {
10113 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10114 stream_.convertInfo[mode].inOffset.push_back( k );
10115 stream_.convertInfo[mode].outOffset.push_back( k );
10119 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10120 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10121 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10122 stream_.convertInfo[mode].inJump = 1;
10123 stream_.convertInfo[mode].outJump = 1;
10128 // Add channel offset.
10129 if ( firstChannel > 0 ) {
10130 if ( stream_.deviceInterleaved[mode] ) {
10131 if ( mode == OUTPUT ) {
10132 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10133 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10136 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10137 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10141 if ( mode == OUTPUT ) {
10142 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10143 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10146 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10147 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10153 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10155 // This function does format conversion, input/output channel compensation, and
10156 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10157 // the lower three bytes of a 32-bit integer.
10159 // Clear our device buffer when in/out duplex device channels are different
10160 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10161 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10162 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10165 if (info.outFormat == RTAUDIO_FLOAT64) {
10167 Float64 *out = (Float64 *)outBuffer;
10169 if (info.inFormat == RTAUDIO_SINT8) {
10170 signed char *in = (signed char *)inBuffer;
10171 scale = 1.0 / 127.5;
10172 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10173 for (j=0; j<info.channels; j++) {
10174 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10175 out[info.outOffset[j]] += 0.5;
10176 out[info.outOffset[j]] *= scale;
10179 out += info.outJump;
10182 else if (info.inFormat == RTAUDIO_SINT16) {
10183 Int16 *in = (Int16 *)inBuffer;
10184 scale = 1.0 / 32767.5;
10185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10186 for (j=0; j<info.channels; j++) {
10187 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10188 out[info.outOffset[j]] += 0.5;
10189 out[info.outOffset[j]] *= scale;
10192 out += info.outJump;
10195 else if (info.inFormat == RTAUDIO_SINT24) {
10196 Int24 *in = (Int24 *)inBuffer;
10197 scale = 1.0 / 8388607.5;
10198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10199 for (j=0; j<info.channels; j++) {
10200 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10201 out[info.outOffset[j]] += 0.5;
10202 out[info.outOffset[j]] *= scale;
10205 out += info.outJump;
10208 else if (info.inFormat == RTAUDIO_SINT32) {
10209 Int32 *in = (Int32 *)inBuffer;
10210 scale = 1.0 / 2147483647.5;
10211 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10212 for (j=0; j<info.channels; j++) {
10213 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10214 out[info.outOffset[j]] += 0.5;
10215 out[info.outOffset[j]] *= scale;
10218 out += info.outJump;
10221 else if (info.inFormat == RTAUDIO_FLOAT32) {
10222 Float32 *in = (Float32 *)inBuffer;
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10224 for (j=0; j<info.channels; j++) {
10225 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10228 out += info.outJump;
10231 else if (info.inFormat == RTAUDIO_FLOAT64) {
10232 // Channel compensation and/or (de)interleaving only.
10233 Float64 *in = (Float64 *)inBuffer;
10234 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10235 for (j=0; j<info.channels; j++) {
10236 out[info.outOffset[j]] = in[info.inOffset[j]];
10239 out += info.outJump;
10243 else if (info.outFormat == RTAUDIO_FLOAT32) {
10245 Float32 *out = (Float32 *)outBuffer;
10247 if (info.inFormat == RTAUDIO_SINT8) {
10248 signed char *in = (signed char *)inBuffer;
10249 scale = (Float32) ( 1.0 / 127.5 );
10250 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10251 for (j=0; j<info.channels; j++) {
10252 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10253 out[info.outOffset[j]] += 0.5;
10254 out[info.outOffset[j]] *= scale;
10257 out += info.outJump;
10260 else if (info.inFormat == RTAUDIO_SINT16) {
10261 Int16 *in = (Int16 *)inBuffer;
10262 scale = (Float32) ( 1.0 / 32767.5 );
10263 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10264 for (j=0; j<info.channels; j++) {
10265 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10266 out[info.outOffset[j]] += 0.5;
10267 out[info.outOffset[j]] *= scale;
10270 out += info.outJump;
10273 else if (info.inFormat == RTAUDIO_SINT24) {
10274 Int24 *in = (Int24 *)inBuffer;
10275 scale = (Float32) ( 1.0 / 8388607.5 );
10276 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10277 for (j=0; j<info.channels; j++) {
10278 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10279 out[info.outOffset[j]] += 0.5;
10280 out[info.outOffset[j]] *= scale;
10283 out += info.outJump;
10286 else if (info.inFormat == RTAUDIO_SINT32) {
10287 Int32 *in = (Int32 *)inBuffer;
10288 scale = (Float32) ( 1.0 / 2147483647.5 );
10289 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10290 for (j=0; j<info.channels; j++) {
10291 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10292 out[info.outOffset[j]] += 0.5;
10293 out[info.outOffset[j]] *= scale;
10296 out += info.outJump;
10299 else if (info.inFormat == RTAUDIO_FLOAT32) {
10300 // Channel compensation and/or (de)interleaving only.
10301 Float32 *in = (Float32 *)inBuffer;
10302 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10303 for (j=0; j<info.channels; j++) {
10304 out[info.outOffset[j]] = in[info.inOffset[j]];
10307 out += info.outJump;
10310 else if (info.inFormat == RTAUDIO_FLOAT64) {
10311 Float64 *in = (Float64 *)inBuffer;
10312 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10313 for (j=0; j<info.channels; j++) {
10314 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10317 out += info.outJump;
10321 else if (info.outFormat == RTAUDIO_SINT32) {
10322 Int32 *out = (Int32 *)outBuffer;
10323 if (info.inFormat == RTAUDIO_SINT8) {
10324 signed char *in = (signed char *)inBuffer;
10325 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10326 for (j=0; j<info.channels; j++) {
10327 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10328 out[info.outOffset[j]] <<= 24;
10331 out += info.outJump;
10334 else if (info.inFormat == RTAUDIO_SINT16) {
10335 Int16 *in = (Int16 *)inBuffer;
10336 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10337 for (j=0; j<info.channels; j++) {
10338 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10339 out[info.outOffset[j]] <<= 16;
10342 out += info.outJump;
10345 else if (info.inFormat == RTAUDIO_SINT24) {
10346 Int24 *in = (Int24 *)inBuffer;
10347 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10348 for (j=0; j<info.channels; j++) {
10349 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10350 out[info.outOffset[j]] <<= 8;
10353 out += info.outJump;
10356 else if (info.inFormat == RTAUDIO_SINT32) {
10357 // Channel compensation and/or (de)interleaving only.
10358 Int32 *in = (Int32 *)inBuffer;
10359 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10360 for (j=0; j<info.channels; j++) {
10361 out[info.outOffset[j]] = in[info.inOffset[j]];
10364 out += info.outJump;
10367 else if (info.inFormat == RTAUDIO_FLOAT32) {
10368 Float32 *in = (Float32 *)inBuffer;
10369 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10370 for (j=0; j<info.channels; j++) {
10371 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10374 out += info.outJump;
10377 else if (info.inFormat == RTAUDIO_FLOAT64) {
10378 Float64 *in = (Float64 *)inBuffer;
10379 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10380 for (j=0; j<info.channels; j++) {
10381 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10384 out += info.outJump;
10388 else if (info.outFormat == RTAUDIO_SINT24) {
10389 Int24 *out = (Int24 *)outBuffer;
10390 if (info.inFormat == RTAUDIO_SINT8) {
10391 signed char *in = (signed char *)inBuffer;
10392 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10393 for (j=0; j<info.channels; j++) {
10394 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10395 //out[info.outOffset[j]] <<= 16;
10398 out += info.outJump;
10401 else if (info.inFormat == RTAUDIO_SINT16) {
10402 Int16 *in = (Int16 *)inBuffer;
10403 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10404 for (j=0; j<info.channels; j++) {
10405 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10406 //out[info.outOffset[j]] <<= 8;
10409 out += info.outJump;
10412 else if (info.inFormat == RTAUDIO_SINT24) {
10413 // Channel compensation and/or (de)interleaving only.
10414 Int24 *in = (Int24 *)inBuffer;
10415 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10416 for (j=0; j<info.channels; j++) {
10417 out[info.outOffset[j]] = in[info.inOffset[j]];
10420 out += info.outJump;
10423 else if (info.inFormat == RTAUDIO_SINT32) {
10424 Int32 *in = (Int32 *)inBuffer;
10425 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10426 for (j=0; j<info.channels; j++) {
10427 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10428 //out[info.outOffset[j]] >>= 8;
10431 out += info.outJump;
10434 else if (info.inFormat == RTAUDIO_FLOAT32) {
10435 Float32 *in = (Float32 *)inBuffer;
10436 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10437 for (j=0; j<info.channels; j++) {
10438 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10441 out += info.outJump;
10444 else if (info.inFormat == RTAUDIO_FLOAT64) {
10445 Float64 *in = (Float64 *)inBuffer;
10446 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10447 for (j=0; j<info.channels; j++) {
10448 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10451 out += info.outJump;
10455 else if (info.outFormat == RTAUDIO_SINT16) {
10456 Int16 *out = (Int16 *)outBuffer;
10457 if (info.inFormat == RTAUDIO_SINT8) {
10458 signed char *in = (signed char *)inBuffer;
10459 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10460 for (j=0; j<info.channels; j++) {
10461 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10462 out[info.outOffset[j]] <<= 8;
10465 out += info.outJump;
10468 else if (info.inFormat == RTAUDIO_SINT16) {
10469 // Channel compensation and/or (de)interleaving only.
10470 Int16 *in = (Int16 *)inBuffer;
10471 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10472 for (j=0; j<info.channels; j++) {
10473 out[info.outOffset[j]] = in[info.inOffset[j]];
10476 out += info.outJump;
10479 else if (info.inFormat == RTAUDIO_SINT24) {
10480 Int24 *in = (Int24 *)inBuffer;
10481 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10482 for (j=0; j<info.channels; j++) {
10483 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10486 out += info.outJump;
10489 else if (info.inFormat == RTAUDIO_SINT32) {
10490 Int32 *in = (Int32 *)inBuffer;
10491 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10492 for (j=0; j<info.channels; j++) {
10493 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10496 out += info.outJump;
10499 else if (info.inFormat == RTAUDIO_FLOAT32) {
10500 Float32 *in = (Float32 *)inBuffer;
10501 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10502 for (j=0; j<info.channels; j++) {
10503 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10506 out += info.outJump;
10509 else if (info.inFormat == RTAUDIO_FLOAT64) {
10510 Float64 *in = (Float64 *)inBuffer;
10511 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10512 for (j=0; j<info.channels; j++) {
10513 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10516 out += info.outJump;
10520 else if (info.outFormat == RTAUDIO_SINT8) {
10521 signed char *out = (signed char *)outBuffer;
10522 if (info.inFormat == RTAUDIO_SINT8) {
10523 // Channel compensation and/or (de)interleaving only.
10524 signed char *in = (signed char *)inBuffer;
10525 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10526 for (j=0; j<info.channels; j++) {
10527 out[info.outOffset[j]] = in[info.inOffset[j]];
10530 out += info.outJump;
10533 if (info.inFormat == RTAUDIO_SINT16) {
10534 Int16 *in = (Int16 *)inBuffer;
10535 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10536 for (j=0; j<info.channels; j++) {
10537 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10540 out += info.outJump;
10543 else if (info.inFormat == RTAUDIO_SINT24) {
10544 Int24 *in = (Int24 *)inBuffer;
10545 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10546 for (j=0; j<info.channels; j++) {
10547 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10550 out += info.outJump;
10553 else if (info.inFormat == RTAUDIO_SINT32) {
10554 Int32 *in = (Int32 *)inBuffer;
10555 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10556 for (j=0; j<info.channels; j++) {
10557 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10560 out += info.outJump;
10563 else if (info.inFormat == RTAUDIO_FLOAT32) {
10564 Float32 *in = (Float32 *)inBuffer;
10565 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10566 for (j=0; j<info.channels; j++) {
10567 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10570 out += info.outJump;
10573 else if (info.inFormat == RTAUDIO_FLOAT64) {
10574 Float64 *in = (Float64 *)inBuffer;
10575 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10576 for (j=0; j<info.channels; j++) {
10577 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10580 out += info.outJump;
10586 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10587 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10588 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10590 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10596 if ( format == RTAUDIO_SINT16 ) {
10597 for ( unsigned int i=0; i<samples; i++ ) {
10598 // Swap 1st and 2nd bytes.
10603 // Increment 2 bytes.
10607 else if ( format == RTAUDIO_SINT32 ||
10608 format == RTAUDIO_FLOAT32 ) {
10609 for ( unsigned int i=0; i<samples; i++ ) {
10610 // Swap 1st and 4th bytes.
10615 // Swap 2nd and 3rd bytes.
10621 // Increment 3 more bytes.
10625 else if ( format == RTAUDIO_SINT24 ) {
10626 for ( unsigned int i=0; i<samples; i++ ) {
10627 // Swap 1st and 3rd bytes.
10632 // Increment 2 more bytes.
10636 else if ( format == RTAUDIO_FLOAT64 ) {
10637 for ( unsigned int i=0; i<samples; i++ ) {
10638 // Swap 1st and 8th bytes
10643 // Swap 2nd and 7th bytes
10649 // Swap 3rd and 6th bytes
10655 // Swap 4th and 5th bytes
10661 // Increment 5 more bytes.
10667 // Indentation settings for Vim and Emacs
10669 // Local Variables:
10670 // c-basic-offset: 2
10671 // indent-tabs-mode: nil
10674 // vim: et sts=2 sw=2