1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 #if defined( HAVE_GETTIMEOFDAY )
1545 gettimeofday( &stream_.lastTickTimestamp, NULL );
1548 OSStatus result = noErr;
1549 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1552 result = AudioDeviceStart( handle->id[0], callbackHandler );
1553 if ( result != noErr ) {
1554 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1555 errorText_ = errorStream_.str();
1560 if ( stream_.mode == INPUT ||
1561 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1563 result = AudioDeviceStart( handle->id[1], callbackHandler );
1564 if ( result != noErr ) {
1565 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1566 errorText_ = errorStream_.str();
1571 handle->drainCounter = 0;
1572 handle->internalDrain = false;
1573 stream_.state = STREAM_RUNNING;
1576 if ( result == noErr ) return;
1577 error( RtAudioError::SYSTEM_ERROR );
1580 void RtApiCore :: stopStream( void )
1583 if ( stream_.state == STREAM_STOPPED ) {
1584 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1585 error( RtAudioError::WARNING );
1589 OSStatus result = noErr;
1590 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1591 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1593 if ( handle->drainCounter == 0 ) {
1594 handle->drainCounter = 2;
1595 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1598 result = AudioDeviceStop( handle->id[0], callbackHandler );
1599 if ( result != noErr ) {
1600 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1601 errorText_ = errorStream_.str();
1606 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1608 result = AudioDeviceStop( handle->id[1], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1611 errorText_ = errorStream_.str();
1616 stream_.state = STREAM_STOPPED;
1619 if ( result == noErr ) return;
1620 error( RtAudioError::SYSTEM_ERROR );
1623 void RtApiCore :: abortStream( void )
1626 if ( stream_.state == STREAM_STOPPED ) {
1627 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1628 error( RtAudioError::WARNING );
1632 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1633 handle->drainCounter = 2;
1638 // This function will be called by a spawned thread when the user
1639 // callback function signals that the stream should be stopped or
1640 // aborted. It is better to handle it this way because the
1641 // callbackEvent() function probably should return before the AudioDeviceStop()
1642 // function is called.
1643 static void *coreStopStream( void *ptr )
1645 CallbackInfo *info = (CallbackInfo *) ptr;
1646 RtApiCore *object = (RtApiCore *) info->object;
1648 object->stopStream();
1649 pthread_exit( NULL );
1652 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1653 const AudioBufferList *inBufferList,
1654 const AudioBufferList *outBufferList )
1656 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1657 if ( stream_.state == STREAM_CLOSED ) {
1658 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1659 error( RtAudioError::WARNING );
1663 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1664 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1666 // Check if we were draining the stream and signal is finished.
1667 if ( handle->drainCounter > 3 ) {
1668 ThreadHandle threadId;
1670 stream_.state = STREAM_STOPPING;
1671 if ( handle->internalDrain == true )
1672 pthread_create( &threadId, NULL, coreStopStream, info );
1673 else // external call to stopStream()
1674 pthread_cond_signal( &handle->condition );
1678 AudioDeviceID outputDevice = handle->id[0];
1680 // Invoke user callback to get fresh output data UNLESS we are
1681 // draining stream or duplex mode AND the input/output devices are
1682 // different AND this function is called for the input device.
1683 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1684 RtAudioCallback callback = (RtAudioCallback) info->callback;
1685 double streamTime = getStreamTime();
1686 RtAudioStreamStatus status = 0;
1687 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1688 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1689 handle->xrun[0] = false;
1691 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1692 status |= RTAUDIO_INPUT_OVERFLOW;
1693 handle->xrun[1] = false;
1696 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1697 stream_.bufferSize, streamTime, status, info->userData );
1698 if ( cbReturnValue == 2 ) {
1699 stream_.state = STREAM_STOPPING;
1700 handle->drainCounter = 2;
1704 else if ( cbReturnValue == 1 ) {
1705 handle->drainCounter = 1;
1706 handle->internalDrain = true;
1710 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1712 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1714 if ( handle->nStreams[0] == 1 ) {
1715 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1717 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1719 else { // fill multiple streams with zeros
1720 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1721 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1723 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1727 else if ( handle->nStreams[0] == 1 ) {
1728 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1729 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0], stream_.convertInfo[0] );
1732 else { // copy from user buffer
1733 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1734 stream_.userBuffer[0],
1735 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1738 else { // fill multiple streams
1739 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1740 if ( stream_.doConvertBuffer[0] ) {
1741 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1742 inBuffer = (Float32 *) stream_.deviceBuffer;
1745 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1746 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1747 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1748 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1749 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1752 else { // fill multiple multi-channel streams with interleaved data
1753 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1756 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1757 UInt32 inChannels = stream_.nUserChannels[0];
1758 if ( stream_.doConvertBuffer[0] ) {
1759 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1760 inChannels = stream_.nDeviceChannels[0];
1763 if ( inInterleaved ) inOffset = 1;
1764 else inOffset = stream_.bufferSize;
1766 channelsLeft = inChannels;
1767 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1770 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1773 // Account for possible channel offset in first stream
1774 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1775 streamChannels -= stream_.channelOffset[0];
1776 outJump = stream_.channelOffset[0];
1780 // Account for possible unfilled channels at end of the last stream
1781 if ( streamChannels > channelsLeft ) {
1782 outJump = streamChannels - channelsLeft;
1783 streamChannels = channelsLeft;
1786 // Determine input buffer offsets and skips
1787 if ( inInterleaved ) {
1788 inJump = inChannels;
1789 in += inChannels - channelsLeft;
1793 in += (inChannels - channelsLeft) * inOffset;
1796 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1797 for ( unsigned int j=0; j<streamChannels; j++ ) {
1798 *out++ = in[j*inOffset];
1803 channelsLeft -= streamChannels;
1809 // Don't bother draining input
1810 if ( handle->drainCounter ) {
1811 handle->drainCounter++;
1815 AudioDeviceID inputDevice;
1816 inputDevice = handle->id[1];
1817 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1819 if ( handle->nStreams[1] == 1 ) {
1820 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1821 convertBuffer( stream_.userBuffer[1],
1822 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1823 stream_.convertInfo[1] );
1825 else { // copy to user buffer
1826 memcpy( stream_.userBuffer[1],
1827 inBufferList->mBuffers[handle->iStream[1]].mData,
1828 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1831 else { // read from multiple streams
1832 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1833 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1835 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1836 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1837 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1838 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1839 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1842 else { // read from multiple multi-channel streams
1843 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1846 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1847 UInt32 outChannels = stream_.nUserChannels[1];
1848 if ( stream_.doConvertBuffer[1] ) {
1849 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1850 outChannels = stream_.nDeviceChannels[1];
1853 if ( outInterleaved ) outOffset = 1;
1854 else outOffset = stream_.bufferSize;
1856 channelsLeft = outChannels;
1857 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1859 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1860 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1863 // Account for possible channel offset in first stream
1864 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1865 streamChannels -= stream_.channelOffset[1];
1866 inJump = stream_.channelOffset[1];
1870 // Account for possible unread channels at end of the last stream
1871 if ( streamChannels > channelsLeft ) {
1872 inJump = streamChannels - channelsLeft;
1873 streamChannels = channelsLeft;
1876 // Determine output buffer offsets and skips
1877 if ( outInterleaved ) {
1878 outJump = outChannels;
1879 out += outChannels - channelsLeft;
1883 out += (outChannels - channelsLeft) * outOffset;
1886 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1887 for ( unsigned int j=0; j<streamChannels; j++ ) {
1888 out[j*outOffset] = *in++;
1893 channelsLeft -= streamChannels;
1897 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1898 convertBuffer( stream_.userBuffer[1],
1899 stream_.deviceBuffer,
1900 stream_.convertInfo[1] );
1906 //MUTEX_UNLOCK( &stream_.mutex );
1908 RtApi::tickStreamTime();
1912 const char* RtApiCore :: getErrorCode( OSStatus code )
1916 case kAudioHardwareNotRunningError:
1917 return "kAudioHardwareNotRunningError";
1919 case kAudioHardwareUnspecifiedError:
1920 return "kAudioHardwareUnspecifiedError";
1922 case kAudioHardwareUnknownPropertyError:
1923 return "kAudioHardwareUnknownPropertyError";
1925 case kAudioHardwareBadPropertySizeError:
1926 return "kAudioHardwareBadPropertySizeError";
1928 case kAudioHardwareIllegalOperationError:
1929 return "kAudioHardwareIllegalOperationError";
1931 case kAudioHardwareBadObjectError:
1932 return "kAudioHardwareBadObjectError";
1934 case kAudioHardwareBadDeviceError:
1935 return "kAudioHardwareBadDeviceError";
1937 case kAudioHardwareBadStreamError:
1938 return "kAudioHardwareBadStreamError";
1940 case kAudioHardwareUnsupportedOperationError:
1941 return "kAudioHardwareUnsupportedOperationError";
1943 case kAudioDeviceUnsupportedFormatError:
1944 return "kAudioDeviceUnsupportedFormatError";
1946 case kAudioDevicePermissionsError:
1947 return "kAudioDevicePermissionsError";
1950 return "CoreAudio unknown error";
1954 //******************** End of __MACOSX_CORE__ *********************//
1957 #if defined(__UNIX_JACK__)
1959 // JACK is a low-latency audio server, originally written for the
1960 // GNU/Linux operating system and now also ported to OS-X. It can
1961 // connect a number of different applications to an audio device, as
1962 // well as allowing them to share audio between themselves.
1964 // When using JACK with RtAudio, "devices" refer to JACK clients that
1965 // have ports connected to the server. The JACK server is typically
1966 // started in a terminal as follows:
1968 // .jackd -d alsa -d hw:0
1970 // or through an interface program such as qjackctl. Many of the
1971 // parameters normally set for a stream are fixed by the JACK server
1972 // and can be specified when the JACK server is started. In
1975 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1977 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1978 // frames, and number of buffers = 4. Once the server is running, it
1979 // is not possible to override these values. If the values are not
1980 // specified in the command-line, the JACK server uses default values.
1982 // The JACK server does not have to be running when an instance of
1983 // RtApiJack is created, though the function getDeviceCount() will
1984 // report 0 devices found until JACK has been started. When no
1985 // devices are available (i.e., the JACK server is not running), a
1986 // stream cannot be opened.
1988 #include <jack/jack.h>
1992 // A structure to hold various information related to the Jack API
1995 jack_client_t *client;
1996 jack_port_t **ports[2];
1997 std::string deviceName[2];
1999 pthread_cond_t condition;
2000 int drainCounter; // Tracks callback counts when draining
2001 bool internalDrain; // Indicates if stop is initiated from callback or not.
2004 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2007 #if !defined(__RTAUDIO_DEBUG__)
2008 static void jackSilentError( const char * ) {};
2011 RtApiJack :: RtApiJack()
2012 :shouldAutoconnect_(true) {
2013 // Nothing to do here.
2014 #if !defined(__RTAUDIO_DEBUG__)
2015 // Turn off Jack's internal error reporting.
2016 jack_set_error_function( &jackSilentError );
2020 RtApiJack :: ~RtApiJack()
2022 if ( stream_.state != STREAM_CLOSED ) closeStream();
2025 unsigned int RtApiJack :: getDeviceCount( void )
2027 // See if we can become a jack client.
2028 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2029 jack_status_t *status = NULL;
2030 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2031 if ( client == 0 ) return 0;
2034 std::string port, previousPort;
2035 unsigned int nChannels = 0, nDevices = 0;
2036 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2038 // Parse the port names up to the first colon (:).
2041 port = (char *) ports[ nChannels ];
2042 iColon = port.find(":");
2043 if ( iColon != std::string::npos ) {
2044 port = port.substr( 0, iColon + 1 );
2045 if ( port != previousPort ) {
2047 previousPort = port;
2050 } while ( ports[++nChannels] );
2054 jack_client_close( client );
2058 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2060 RtAudio::DeviceInfo info;
2061 info.probed = false;
2063 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2064 jack_status_t *status = NULL;
2065 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2066 if ( client == 0 ) {
2067 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2068 error( RtAudioError::WARNING );
2073 std::string port, previousPort;
2074 unsigned int nPorts = 0, nDevices = 0;
2075 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2077 // Parse the port names up to the first colon (:).
2080 port = (char *) ports[ nPorts ];
2081 iColon = port.find(":");
2082 if ( iColon != std::string::npos ) {
2083 port = port.substr( 0, iColon );
2084 if ( port != previousPort ) {
2085 if ( nDevices == device ) info.name = port;
2087 previousPort = port;
2090 } while ( ports[++nPorts] );
2094 if ( device >= nDevices ) {
2095 jack_client_close( client );
2096 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2097 error( RtAudioError::INVALID_USE );
2101 // Get the current jack server sample rate.
2102 info.sampleRates.clear();
2104 info.preferredSampleRate = jack_get_sample_rate( client );
2105 info.sampleRates.push_back( info.preferredSampleRate );
2107 // Count the available ports containing the client name as device
2108 // channels. Jack "input ports" equal RtAudio output channels.
2109 unsigned int nChannels = 0;
2110 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2112 while ( ports[ nChannels ] ) nChannels++;
2114 info.outputChannels = nChannels;
2117 // Jack "output ports" equal RtAudio input channels.
2119 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2121 while ( ports[ nChannels ] ) nChannels++;
2123 info.inputChannels = nChannels;
2126 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2127 jack_client_close(client);
2128 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2129 error( RtAudioError::WARNING );
2133 // If device opens for both playback and capture, we determine the channels.
2134 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2135 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2137 // Jack always uses 32-bit floats.
2138 info.nativeFormats = RTAUDIO_FLOAT32;
2140 // Jack doesn't provide default devices so we'll use the first available one.
2141 if ( device == 0 && info.outputChannels > 0 )
2142 info.isDefaultOutput = true;
2143 if ( device == 0 && info.inputChannels > 0 )
2144 info.isDefaultInput = true;
2146 jack_client_close(client);
2151 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2153 CallbackInfo *info = (CallbackInfo *) infoPointer;
2155 RtApiJack *object = (RtApiJack *) info->object;
2156 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2161 // This function will be called by a spawned thread when the Jack
2162 // server signals that it is shutting down. It is necessary to handle
2163 // it this way because the jackShutdown() function must return before
2164 // the jack_deactivate() function (in closeStream()) will return.
2165 static void *jackCloseStream( void *ptr )
2167 CallbackInfo *info = (CallbackInfo *) ptr;
2168 RtApiJack *object = (RtApiJack *) info->object;
2170 object->closeStream();
2172 pthread_exit( NULL );
2174 static void jackShutdown( void *infoPointer )
2176 CallbackInfo *info = (CallbackInfo *) infoPointer;
2177 RtApiJack *object = (RtApiJack *) info->object;
2179 // Check current stream state. If stopped, then we'll assume this
2180 // was called as a result of a call to RtApiJack::stopStream (the
2181 // deactivation of a client handle causes this function to be called).
2182 // If not, we'll assume the Jack server is shutting down or some
2183 // other problem occurred and we should close the stream.
2184 if ( object->isStreamRunning() == false ) return;
2186 ThreadHandle threadId;
2187 pthread_create( &threadId, NULL, jackCloseStream, info );
2188 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2191 static int jackXrun( void *infoPointer )
2193 JackHandle *handle = *((JackHandle **) infoPointer);
2195 if ( handle->ports[0] ) handle->xrun[0] = true;
2196 if ( handle->ports[1] ) handle->xrun[1] = true;
2201 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2202 unsigned int firstChannel, unsigned int sampleRate,
2203 RtAudioFormat format, unsigned int *bufferSize,
2204 RtAudio::StreamOptions *options )
2206 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2208 // Look for jack server and try to become a client (only do once per stream).
2209 jack_client_t *client = 0;
2210 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2211 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2212 jack_status_t *status = NULL;
2213 if ( options && !options->streamName.empty() )
2214 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2216 client = jack_client_open( "RtApiJack", jackoptions, status );
2217 if ( client == 0 ) {
2218 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2219 error( RtAudioError::WARNING );
2224 // The handle must have been created on an earlier pass.
2225 client = handle->client;
2229 std::string port, previousPort, deviceName;
2230 unsigned int nPorts = 0, nDevices = 0;
2231 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2233 // Parse the port names up to the first colon (:).
2236 port = (char *) ports[ nPorts ];
2237 iColon = port.find(":");
2238 if ( iColon != std::string::npos ) {
2239 port = port.substr( 0, iColon );
2240 if ( port != previousPort ) {
2241 if ( nDevices == device ) deviceName = port;
2243 previousPort = port;
2246 } while ( ports[++nPorts] );
2250 if ( device >= nDevices ) {
2251 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2255 unsigned long flag = JackPortIsInput;
2256 if ( mode == INPUT ) flag = JackPortIsOutput;
2258 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2259 // Count the available ports containing the client name as device
2260 // channels. Jack "input ports" equal RtAudio output channels.
2261 unsigned int nChannels = 0;
2262 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2264 while ( ports[ nChannels ] ) nChannels++;
2267 // Compare the jack ports for specified client to the requested number of channels.
2268 if ( nChannels < (channels + firstChannel) ) {
2269 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2270 errorText_ = errorStream_.str();
2275 // Check the jack server sample rate.
2276 unsigned int jackRate = jack_get_sample_rate( client );
2277 if ( sampleRate != jackRate ) {
2278 jack_client_close( client );
2279 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2280 errorText_ = errorStream_.str();
2283 stream_.sampleRate = jackRate;
2285 // Get the latency of the JACK port.
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2287 if ( ports[ firstChannel ] ) {
2289 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2290 // the range (usually the min and max are equal)
2291 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2292 // get the latency range
2293 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2294 // be optimistic, use the min!
2295 stream_.latency[mode] = latrange.min;
2296 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2300 // The jack server always uses 32-bit floating-point data.
2301 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2302 stream_.userFormat = format;
2304 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2305 else stream_.userInterleaved = true;
2307 // Jack always uses non-interleaved buffers.
2308 stream_.deviceInterleaved[mode] = false;
2310 // Jack always provides host byte-ordered data.
2311 stream_.doByteSwap[mode] = false;
2313 // Get the buffer size. The buffer size and number of buffers
2314 // (periods) is set when the jack server is started.
2315 stream_.bufferSize = (int) jack_get_buffer_size( client );
2316 *bufferSize = stream_.bufferSize;
2318 stream_.nDeviceChannels[mode] = channels;
2319 stream_.nUserChannels[mode] = channels;
2321 // Set flags for buffer conversion.
2322 stream_.doConvertBuffer[mode] = false;
2323 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2324 stream_.doConvertBuffer[mode] = true;
2325 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2326 stream_.nUserChannels[mode] > 1 )
2327 stream_.doConvertBuffer[mode] = true;
2329 // Allocate our JackHandle structure for the stream.
2330 if ( handle == 0 ) {
2332 handle = new JackHandle;
2334 catch ( std::bad_alloc& ) {
2335 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2339 if ( pthread_cond_init(&handle->condition, NULL) ) {
2340 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2343 stream_.apiHandle = (void *) handle;
2344 handle->client = client;
2346 handle->deviceName[mode] = deviceName;
2348 // Allocate necessary internal buffers.
2349 unsigned long bufferBytes;
2350 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2351 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2352 if ( stream_.userBuffer[mode] == NULL ) {
2353 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2357 if ( stream_.doConvertBuffer[mode] ) {
2359 bool makeBuffer = true;
2360 if ( mode == OUTPUT )
2361 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2362 else { // mode == INPUT
2363 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2364 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2365 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2366 if ( bufferBytes < bytesOut ) makeBuffer = false;
2371 bufferBytes *= *bufferSize;
2372 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2373 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2374 if ( stream_.deviceBuffer == NULL ) {
2375 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2381 // Allocate memory for the Jack ports (channels) identifiers.
2382 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2383 if ( handle->ports[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2388 stream_.device[mode] = device;
2389 stream_.channelOffset[mode] = firstChannel;
2390 stream_.state = STREAM_STOPPED;
2391 stream_.callbackInfo.object = (void *) this;
2393 if ( stream_.mode == OUTPUT && mode == INPUT )
2394 // We had already set up the stream for output.
2395 stream_.mode = DUPLEX;
2397 stream_.mode = mode;
2398 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2399 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2400 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2403 // Register our ports.
2405 if ( mode == OUTPUT ) {
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2407 snprintf( label, 64, "outport %d", i );
2408 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2409 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2413 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2414 snprintf( label, 64, "inport %d", i );
2415 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2416 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2420 // Setup the buffer conversion information structure. We don't use
2421 // buffers to do channel offsets, so we override that parameter
2423 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2425 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2431 pthread_cond_destroy( &handle->condition );
2432 jack_client_close( handle->client );
2434 if ( handle->ports[0] ) free( handle->ports[0] );
2435 if ( handle->ports[1] ) free( handle->ports[1] );
2438 stream_.apiHandle = 0;
2441 for ( int i=0; i<2; i++ ) {
2442 if ( stream_.userBuffer[i] ) {
2443 free( stream_.userBuffer[i] );
2444 stream_.userBuffer[i] = 0;
2448 if ( stream_.deviceBuffer ) {
2449 free( stream_.deviceBuffer );
2450 stream_.deviceBuffer = 0;
2456 void RtApiJack :: closeStream( void )
2458 if ( stream_.state == STREAM_CLOSED ) {
2459 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2460 error( RtAudioError::WARNING );
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2467 if ( stream_.state == STREAM_RUNNING )
2468 jack_deactivate( handle->client );
2470 jack_client_close( handle->client );
2474 if ( handle->ports[0] ) free( handle->ports[0] );
2475 if ( handle->ports[1] ) free( handle->ports[1] );
2476 pthread_cond_destroy( &handle->condition );
2478 stream_.apiHandle = 0;
2481 for ( int i=0; i<2; i++ ) {
2482 if ( stream_.userBuffer[i] ) {
2483 free( stream_.userBuffer[i] );
2484 stream_.userBuffer[i] = 0;
2488 if ( stream_.deviceBuffer ) {
2489 free( stream_.deviceBuffer );
2490 stream_.deviceBuffer = 0;
2493 stream_.mode = UNINITIALIZED;
2494 stream_.state = STREAM_CLOSED;
2497 void RtApiJack :: startStream( void )
2500 if ( stream_.state == STREAM_RUNNING ) {
2501 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2502 error( RtAudioError::WARNING );
2506 #if defined( HAVE_GETTIMEOFDAY )
2507 gettimeofday( &stream_.lastTickTimestamp, NULL );
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2511 int result = jack_activate( handle->client );
2513 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2519 // Get the list of available ports.
2520 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2522 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2523 if ( ports == NULL) {
2524 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2528 // Now make the port connections. Since RtAudio wasn't designed to
2529 // allow the user to select particular channels of a device, we'll
2530 // just open the first "nChannels" ports with offset.
2531 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2533 if ( ports[ stream_.channelOffset[0] + i ] )
2534 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2537 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2544 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2552 // Now make the port connections. See note above.
2553 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2555 if ( ports[ stream_.channelOffset[1] + i ] )
2556 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2559 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2566 handle->drainCounter = 0;
2567 handle->internalDrain = false;
2568 stream_.state = STREAM_RUNNING;
2571 if ( result == 0 ) return;
2572 error( RtAudioError::SYSTEM_ERROR );
2575 void RtApiJack :: stopStream( void )
2578 if ( stream_.state == STREAM_STOPPED ) {
2579 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2580 error( RtAudioError::WARNING );
2584 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2585 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2587 if ( handle->drainCounter == 0 ) {
2588 handle->drainCounter = 2;
2589 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2593 jack_deactivate( handle->client );
2594 stream_.state = STREAM_STOPPED;
2597 void RtApiJack :: abortStream( void )
2600 if ( stream_.state == STREAM_STOPPED ) {
2601 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2602 error( RtAudioError::WARNING );
2606 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2607 handle->drainCounter = 2;
2612 // This function will be called by a spawned thread when the user
2613 // callback function signals that the stream should be stopped or
2614 // aborted. It is necessary to handle it this way because the
2615 // callbackEvent() function must return before the jack_deactivate()
2616 // function will return.
2617 static void *jackStopStream( void *ptr )
2619 CallbackInfo *info = (CallbackInfo *) ptr;
2620 RtApiJack *object = (RtApiJack *) info->object;
2622 object->stopStream();
2623 pthread_exit( NULL );
2626 bool RtApiJack :: callbackEvent( unsigned long nframes )
2628 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2629 if ( stream_.state == STREAM_CLOSED ) {
2630 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2631 error( RtAudioError::WARNING );
2634 if ( stream_.bufferSize != nframes ) {
2635 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2636 error( RtAudioError::WARNING );
2640 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2641 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2643 // Check if we were draining the stream and signal is finished.
2644 if ( handle->drainCounter > 3 ) {
2645 ThreadHandle threadId;
2647 stream_.state = STREAM_STOPPING;
2648 if ( handle->internalDrain == true )
2649 pthread_create( &threadId, NULL, jackStopStream, info );
2651 pthread_cond_signal( &handle->condition );
2655 // Invoke user callback first, to get fresh output data.
2656 if ( handle->drainCounter == 0 ) {
2657 RtAudioCallback callback = (RtAudioCallback) info->callback;
2658 double streamTime = getStreamTime();
2659 RtAudioStreamStatus status = 0;
2660 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2661 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2662 handle->xrun[0] = false;
2664 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2665 status |= RTAUDIO_INPUT_OVERFLOW;
2666 handle->xrun[1] = false;
2668 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2669 stream_.bufferSize, streamTime, status, info->userData );
2670 if ( cbReturnValue == 2 ) {
2671 stream_.state = STREAM_STOPPING;
2672 handle->drainCounter = 2;
2674 pthread_create( &id, NULL, jackStopStream, info );
2677 else if ( cbReturnValue == 1 ) {
2678 handle->drainCounter = 1;
2679 handle->internalDrain = true;
2683 jack_default_audio_sample_t *jackbuffer;
2684 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2685 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2687 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2689 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2690 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2691 memset( jackbuffer, 0, bufferBytes );
2695 else if ( stream_.doConvertBuffer[0] ) {
2697 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2699 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2700 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2701 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2704 else { // no buffer conversion
2705 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2706 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2707 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2712 // Don't bother draining input
2713 if ( handle->drainCounter ) {
2714 handle->drainCounter++;
2718 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2720 if ( stream_.doConvertBuffer[1] ) {
2721 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2722 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2723 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2725 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2727 else { // no buffer conversion
2728 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2729 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2730 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2736 RtApi::tickStreamTime();
2739 //******************** End of __UNIX_JACK__ *********************//
2742 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2744 // The ASIO API is designed around a callback scheme, so this
2745 // implementation is similar to that used for OS-X CoreAudio and Linux
2746 // Jack. The primary constraint with ASIO is that it only allows
2747 // access to a single driver at a time. Thus, it is not possible to
2748 // have more than one simultaneous RtAudio stream.
2750 // This implementation also requires a number of external ASIO files
2751 // and a few global variables. The ASIO callback scheme does not
2752 // allow for the passing of user data, so we must create a global
2753 // pointer to our callbackInfo structure.
2755 // On unix systems, we make use of a pthread condition variable.
2756 // Since there is no equivalent in Windows, I hacked something based
2757 // on information found in
2758 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2760 #include "asiosys.h"
2762 #include "iasiothiscallresolver.h"
2763 #include "asiodrivers.h"
2766 static AsioDrivers drivers;
2767 static ASIOCallbacks asioCallbacks;
2768 static ASIODriverInfo driverInfo;
2769 static CallbackInfo *asioCallbackInfo;
2770 static bool asioXRun;
2773 int drainCounter; // Tracks callback counts when draining
2774 bool internalDrain; // Indicates if stop is initiated from callback or not.
2775 ASIOBufferInfo *bufferInfos;
2779 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2782 // Function declarations (definitions at end of section)
2783 static const char* getAsioErrorString( ASIOError result );
2784 static void sampleRateChanged( ASIOSampleRate sRate );
2785 static long asioMessages( long selector, long value, void* message, double* opt );
2787 RtApiAsio :: RtApiAsio()
2789 // ASIO cannot run on a multi-threaded appartment. You can call
2790 // CoInitialize beforehand, but it must be for appartment threading
2791 // (in which case, CoInitilialize will return S_FALSE here).
2792 coInitialized_ = false;
2793 HRESULT hr = CoInitialize( NULL );
2795 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2796 error( RtAudioError::WARNING );
2798 coInitialized_ = true;
2800 drivers.removeCurrentDriver();
2801 driverInfo.asioVersion = 2;
2803 // See note in DirectSound implementation about GetDesktopWindow().
2804 driverInfo.sysRef = GetForegroundWindow();
2807 RtApiAsio :: ~RtApiAsio()
2809 if ( stream_.state != STREAM_CLOSED ) closeStream();
2810 if ( coInitialized_ ) CoUninitialize();
2813 unsigned int RtApiAsio :: getDeviceCount( void )
2815 return (unsigned int) drivers.asioGetNumDev();
2818 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2820 RtAudio::DeviceInfo info;
2821 info.probed = false;
2824 unsigned int nDevices = getDeviceCount();
2825 if ( nDevices == 0 ) {
2826 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2827 error( RtAudioError::INVALID_USE );
2831 if ( device >= nDevices ) {
2832 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2833 error( RtAudioError::INVALID_USE );
2837 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2838 if ( stream_.state != STREAM_CLOSED ) {
2839 if ( device >= devices_.size() ) {
2840 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2841 error( RtAudioError::WARNING );
2844 return devices_[ device ];
2847 char driverName[32];
2848 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2849 if ( result != ASE_OK ) {
2850 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2851 errorText_ = errorStream_.str();
2852 error( RtAudioError::WARNING );
2856 info.name = driverName;
2858 if ( !drivers.loadDriver( driverName ) ) {
2859 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2860 errorText_ = errorStream_.str();
2861 error( RtAudioError::WARNING );
2865 result = ASIOInit( &driverInfo );
2866 if ( result != ASE_OK ) {
2867 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2868 errorText_ = errorStream_.str();
2869 error( RtAudioError::WARNING );
2873 // Determine the device channel information.
2874 long inputChannels, outputChannels;
2875 result = ASIOGetChannels( &inputChannels, &outputChannels );
2876 if ( result != ASE_OK ) {
2877 drivers.removeCurrentDriver();
2878 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2879 errorText_ = errorStream_.str();
2880 error( RtAudioError::WARNING );
2884 info.outputChannels = outputChannels;
2885 info.inputChannels = inputChannels;
2886 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2887 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2889 // Determine the supported sample rates.
2890 info.sampleRates.clear();
2891 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2892 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2893 if ( result == ASE_OK ) {
2894 info.sampleRates.push_back( SAMPLE_RATES[i] );
2896 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2897 info.preferredSampleRate = SAMPLE_RATES[i];
2901 // Determine supported data types ... just check first channel and assume rest are the same.
2902 ASIOChannelInfo channelInfo;
2903 channelInfo.channel = 0;
2904 channelInfo.isInput = true;
2905 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2906 result = ASIOGetChannelInfo( &channelInfo );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2915 info.nativeFormats = 0;
2916 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2917 info.nativeFormats |= RTAUDIO_SINT16;
2918 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2919 info.nativeFormats |= RTAUDIO_SINT32;
2920 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2921 info.nativeFormats |= RTAUDIO_FLOAT32;
2922 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2923 info.nativeFormats |= RTAUDIO_FLOAT64;
2924 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2925 info.nativeFormats |= RTAUDIO_SINT24;
2927 if ( info.outputChannels > 0 )
2928 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2929 if ( info.inputChannels > 0 )
2930 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2933 drivers.removeCurrentDriver();
2937 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2939 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2940 object->callbackEvent( index );
2943 void RtApiAsio :: saveDeviceInfo( void )
2947 unsigned int nDevices = getDeviceCount();
2948 devices_.resize( nDevices );
2949 for ( unsigned int i=0; i<nDevices; i++ )
2950 devices_[i] = getDeviceInfo( i );
2953 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2954 unsigned int firstChannel, unsigned int sampleRate,
2955 RtAudioFormat format, unsigned int *bufferSize,
2956 RtAudio::StreamOptions *options )
2957 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2959 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2961 // For ASIO, a duplex stream MUST use the same driver.
2962 if ( isDuplexInput && stream_.device[0] != device ) {
2963 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2967 char driverName[32];
2968 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2971 errorText_ = errorStream_.str();
2975 // Only load the driver once for duplex stream.
2976 if ( !isDuplexInput ) {
2977 // The getDeviceInfo() function will not work when a stream is open
2978 // because ASIO does not allow multiple devices to run at the same
2979 // time. Thus, we'll probe the system before opening a stream and
2980 // save the results for use by getDeviceInfo().
2981 this->saveDeviceInfo();
2983 if ( !drivers.loadDriver( driverName ) ) {
2984 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2985 errorText_ = errorStream_.str();
2989 result = ASIOInit( &driverInfo );
2990 if ( result != ASE_OK ) {
2991 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2992 errorText_ = errorStream_.str();
2997 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2998 bool buffersAllocated = false;
2999 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3000 unsigned int nChannels;
3003 // Check the device channel count.
3004 long inputChannels, outputChannels;
3005 result = ASIOGetChannels( &inputChannels, &outputChannels );
3006 if ( result != ASE_OK ) {
3007 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3008 errorText_ = errorStream_.str();
3012 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3013 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3015 errorText_ = errorStream_.str();
3018 stream_.nDeviceChannels[mode] = channels;
3019 stream_.nUserChannels[mode] = channels;
3020 stream_.channelOffset[mode] = firstChannel;
3022 // Verify the sample rate is supported.
3023 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3024 if ( result != ASE_OK ) {
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3026 errorText_ = errorStream_.str();
3030 // Get the current sample rate
3031 ASIOSampleRate currentRate;
3032 result = ASIOGetSampleRate( ¤tRate );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3035 errorText_ = errorStream_.str();
3039 // Set the sample rate only if necessary
3040 if ( currentRate != sampleRate ) {
3041 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3042 if ( result != ASE_OK ) {
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3044 errorText_ = errorStream_.str();
3049 // Determine the driver data type.
3050 ASIOChannelInfo channelInfo;
3051 channelInfo.channel = 0;
3052 if ( mode == OUTPUT ) channelInfo.isInput = false;
3053 else channelInfo.isInput = true;
3054 result = ASIOGetChannelInfo( &channelInfo );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3057 errorText_ = errorStream_.str();
3061 // Assuming WINDOWS host is always little-endian.
3062 stream_.doByteSwap[mode] = false;
3063 stream_.userFormat = format;
3064 stream_.deviceFormat[mode] = 0;
3065 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3066 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3067 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3069 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3071 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3073 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3075 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3077 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3079 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3081 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3082 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3083 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3086 if ( stream_.deviceFormat[mode] == 0 ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3088 errorText_ = errorStream_.str();
3092 // Set the buffer size. For a duplex stream, this will end up
3093 // setting the buffer size based on the input constraints, which
3095 long minSize, maxSize, preferSize, granularity;
3096 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3097 if ( result != ASE_OK ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3099 errorText_ = errorStream_.str();
3103 if ( isDuplexInput ) {
3104 // When this is the duplex input (output was opened before), then we have to use the same
3105 // buffersize as the output, because it might use the preferred buffer size, which most
3106 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3107 // So instead of throwing an error, make them equal. The caller uses the reference
3108 // to the "bufferSize" param as usual to set up processing buffers.
3110 *bufferSize = stream_.bufferSize;
3113 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3114 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3115 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3116 else if ( granularity == -1 ) {
3117 // Make sure bufferSize is a power of two.
3118 int log2_of_min_size = 0;
3119 int log2_of_max_size = 0;
3121 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3122 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3123 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3126 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3127 int min_delta_num = log2_of_min_size;
3129 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3130 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3131 if (current_delta < min_delta) {
3132 min_delta = current_delta;
3137 *bufferSize = ( (unsigned int)1 << min_delta_num );
3138 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3141 else if ( granularity != 0 ) {
3142 // Set to an even multiple of granularity, rounding up.
3143 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3148 // we don't use it anymore, see above!
3149 // Just left it here for the case...
3150 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3151 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3156 stream_.bufferSize = *bufferSize;
3157 stream_.nBuffers = 2;
3159 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3160 else stream_.userInterleaved = true;
3162 // ASIO always uses non-interleaved buffers.
3163 stream_.deviceInterleaved[mode] = false;
3165 // Allocate, if necessary, our AsioHandle structure for the stream.
3166 if ( handle == 0 ) {
3168 handle = new AsioHandle;
3170 catch ( std::bad_alloc& ) {
3171 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3174 handle->bufferInfos = 0;
3176 // Create a manual-reset event.
3177 handle->condition = CreateEvent( NULL, // no security
3178 TRUE, // manual-reset
3179 FALSE, // non-signaled initially
3181 stream_.apiHandle = (void *) handle;
3184 // Create the ASIO internal buffers. Since RtAudio sets up input
3185 // and output separately, we'll have to dispose of previously
3186 // created output buffers for a duplex stream.
3187 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3188 ASIODisposeBuffers();
3189 if ( handle->bufferInfos ) free( handle->bufferInfos );
3192 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3194 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3195 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3196 if ( handle->bufferInfos == NULL ) {
3197 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3198 errorText_ = errorStream_.str();
3202 ASIOBufferInfo *infos;
3203 infos = handle->bufferInfos;
3204 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3205 infos->isInput = ASIOFalse;
3206 infos->channelNum = i + stream_.channelOffset[0];
3207 infos->buffers[0] = infos->buffers[1] = 0;
3209 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3210 infos->isInput = ASIOTrue;
3211 infos->channelNum = i + stream_.channelOffset[1];
3212 infos->buffers[0] = infos->buffers[1] = 0;
3215 // prepare for callbacks
3216 stream_.sampleRate = sampleRate;
3217 stream_.device[mode] = device;
3218 stream_.mode = isDuplexInput ? DUPLEX : mode;
3220 // store this class instance before registering callbacks, that are going to use it
3221 asioCallbackInfo = &stream_.callbackInfo;
3222 stream_.callbackInfo.object = (void *) this;
3224 // Set up the ASIO callback structure and create the ASIO data buffers.
3225 asioCallbacks.bufferSwitch = &bufferSwitch;
3226 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3227 asioCallbacks.asioMessage = &asioMessages;
3228 asioCallbacks.bufferSwitchTimeInfo = NULL;
3229 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3230 if ( result != ASE_OK ) {
3231 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3232 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3233 // In that case, let's be naïve and try that instead.
3234 *bufferSize = preferSize;
3235 stream_.bufferSize = *bufferSize;
3236 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3239 if ( result != ASE_OK ) {
3240 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3241 errorText_ = errorStream_.str();
3244 buffersAllocated = true;
3245 stream_.state = STREAM_STOPPED;
3247 // Set flags for buffer conversion.
3248 stream_.doConvertBuffer[mode] = false;
3249 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3250 stream_.doConvertBuffer[mode] = true;
3251 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3252 stream_.nUserChannels[mode] > 1 )
3253 stream_.doConvertBuffer[mode] = true;
3255 // Allocate necessary internal buffers
3256 unsigned long bufferBytes;
3257 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3258 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3259 if ( stream_.userBuffer[mode] == NULL ) {
3260 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3264 if ( stream_.doConvertBuffer[mode] ) {
3266 bool makeBuffer = true;
3267 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3268 if ( isDuplexInput && stream_.deviceBuffer ) {
3269 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3270 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3274 bufferBytes *= *bufferSize;
3275 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3276 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3277 if ( stream_.deviceBuffer == NULL ) {
3278 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3284 // Determine device latencies
3285 long inputLatency, outputLatency;
3286 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3287 if ( result != ASE_OK ) {
3288 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3289 errorText_ = errorStream_.str();
3290 error( RtAudioError::WARNING); // warn but don't fail
3293 stream_.latency[0] = outputLatency;
3294 stream_.latency[1] = inputLatency;
3297 // Setup the buffer conversion information structure. We don't use
3298 // buffers to do channel offsets, so we override that parameter
3300 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3305 if ( !isDuplexInput ) {
3306 // the cleanup for error in the duplex input, is done by RtApi::openStream
3307 // So we clean up for single channel only
3309 if ( buffersAllocated )
3310 ASIODisposeBuffers();
3312 drivers.removeCurrentDriver();
3315 CloseHandle( handle->condition );
3316 if ( handle->bufferInfos )
3317 free( handle->bufferInfos );
3320 stream_.apiHandle = 0;
3324 if ( stream_.userBuffer[mode] ) {
3325 free( stream_.userBuffer[mode] );
3326 stream_.userBuffer[mode] = 0;
3329 if ( stream_.deviceBuffer ) {
3330 free( stream_.deviceBuffer );
3331 stream_.deviceBuffer = 0;
3336 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3338 void RtApiAsio :: closeStream()
3340 if ( stream_.state == STREAM_CLOSED ) {
3341 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3342 error( RtAudioError::WARNING );
3346 if ( stream_.state == STREAM_RUNNING ) {
3347 stream_.state = STREAM_STOPPED;
3350 ASIODisposeBuffers();
3351 drivers.removeCurrentDriver();
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3355 CloseHandle( handle->condition );
3356 if ( handle->bufferInfos )
3357 free( handle->bufferInfos );
3359 stream_.apiHandle = 0;
3362 for ( int i=0; i<2; i++ ) {
3363 if ( stream_.userBuffer[i] ) {
3364 free( stream_.userBuffer[i] );
3365 stream_.userBuffer[i] = 0;
3369 if ( stream_.deviceBuffer ) {
3370 free( stream_.deviceBuffer );
3371 stream_.deviceBuffer = 0;
3374 stream_.mode = UNINITIALIZED;
3375 stream_.state = STREAM_CLOSED;
3378 bool stopThreadCalled = false;
3380 void RtApiAsio :: startStream()
3383 if ( stream_.state == STREAM_RUNNING ) {
3384 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3385 error( RtAudioError::WARNING );
3389 #if defined( HAVE_GETTIMEOFDAY )
3390 gettimeofday( &stream_.lastTickTimestamp, NULL );
3393 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3394 ASIOError result = ASIOStart();
3395 if ( result != ASE_OK ) {
3396 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3397 errorText_ = errorStream_.str();
3401 handle->drainCounter = 0;
3402 handle->internalDrain = false;
3403 ResetEvent( handle->condition );
3404 stream_.state = STREAM_RUNNING;
3408 stopThreadCalled = false;
3410 if ( result == ASE_OK ) return;
3411 error( RtAudioError::SYSTEM_ERROR );
3414 void RtApiAsio :: stopStream()
3417 if ( stream_.state == STREAM_STOPPED ) {
3418 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3419 error( RtAudioError::WARNING );
3423 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3424 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3425 if ( handle->drainCounter == 0 ) {
3426 handle->drainCounter = 2;
3427 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3431 stream_.state = STREAM_STOPPED;
3433 ASIOError result = ASIOStop();
3434 if ( result != ASE_OK ) {
3435 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3436 errorText_ = errorStream_.str();
3439 if ( result == ASE_OK ) return;
3440 error( RtAudioError::SYSTEM_ERROR );
3443 void RtApiAsio :: abortStream()
3446 if ( stream_.state == STREAM_STOPPED ) {
3447 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3448 error( RtAudioError::WARNING );
3452 // The following lines were commented-out because some behavior was
3453 // noted where the device buffers need to be zeroed to avoid
3454 // continuing sound, even when the device buffers are completely
3455 // disposed. So now, calling abort is the same as calling stop.
3456 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3457 // handle->drainCounter = 2;
3461 // This function will be called by a spawned thread when the user
3462 // callback function signals that the stream should be stopped or
3463 // aborted. It is necessary to handle it this way because the
3464 // callbackEvent() function must return before the ASIOStop()
3465 // function will return.
3466 static unsigned __stdcall asioStopStream( void *ptr )
3468 CallbackInfo *info = (CallbackInfo *) ptr;
3469 RtApiAsio *object = (RtApiAsio *) info->object;
3471 object->stopStream();
3476 bool RtApiAsio :: callbackEvent( long bufferIndex )
3478 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3479 if ( stream_.state == STREAM_CLOSED ) {
3480 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3481 error( RtAudioError::WARNING );
3485 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3486 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 // Check if we were draining the stream and signal if finished.
3489 if ( handle->drainCounter > 3 ) {
3491 stream_.state = STREAM_STOPPING;
3492 if ( handle->internalDrain == false )
3493 SetEvent( handle->condition );
3494 else { // spawn a thread to stop the stream
3496 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3497 &stream_.callbackInfo, 0, &threadId );
3502 // Invoke user callback to get fresh output data UNLESS we are
3504 if ( handle->drainCounter == 0 ) {
3505 RtAudioCallback callback = (RtAudioCallback) info->callback;
3506 double streamTime = getStreamTime();
3507 RtAudioStreamStatus status = 0;
3508 if ( stream_.mode != INPUT && asioXRun == true ) {
3509 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3512 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3513 status |= RTAUDIO_INPUT_OVERFLOW;
3516 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3517 stream_.bufferSize, streamTime, status, info->userData );
3518 if ( cbReturnValue == 2 ) {
3519 stream_.state = STREAM_STOPPING;
3520 handle->drainCounter = 2;
3522 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3523 &stream_.callbackInfo, 0, &threadId );
3526 else if ( cbReturnValue == 1 ) {
3527 handle->drainCounter = 1;
3528 handle->internalDrain = true;
3532 unsigned int nChannels, bufferBytes, i, j;
3533 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3534 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3536 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3538 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3540 for ( i=0, j=0; i<nChannels; i++ ) {
3541 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3542 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3546 else if ( stream_.doConvertBuffer[0] ) {
3548 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3549 if ( stream_.doByteSwap[0] )
3550 byteSwapBuffer( stream_.deviceBuffer,
3551 stream_.bufferSize * stream_.nDeviceChannels[0],
3552 stream_.deviceFormat[0] );
3554 for ( i=0, j=0; i<nChannels; i++ ) {
3555 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3556 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3557 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3563 if ( stream_.doByteSwap[0] )
3564 byteSwapBuffer( stream_.userBuffer[0],
3565 stream_.bufferSize * stream_.nUserChannels[0],
3566 stream_.userFormat );
3568 for ( i=0, j=0; i<nChannels; i++ ) {
3569 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3570 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3571 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3577 // Don't bother draining input
3578 if ( handle->drainCounter ) {
3579 handle->drainCounter++;
3583 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3585 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3587 if (stream_.doConvertBuffer[1]) {
3589 // Always interleave ASIO input data.
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3592 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3593 handle->bufferInfos[i].buffers[bufferIndex],
3597 if ( stream_.doByteSwap[1] )
3598 byteSwapBuffer( stream_.deviceBuffer,
3599 stream_.bufferSize * stream_.nDeviceChannels[1],
3600 stream_.deviceFormat[1] );
3601 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3605 for ( i=0, j=0; i<nChannels; i++ ) {
3606 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3607 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3608 handle->bufferInfos[i].buffers[bufferIndex],
3613 if ( stream_.doByteSwap[1] )
3614 byteSwapBuffer( stream_.userBuffer[1],
3615 stream_.bufferSize * stream_.nUserChannels[1],
3616 stream_.userFormat );
3621 // The following call was suggested by Malte Clasen. While the API
3622 // documentation indicates it should not be required, some device
3623 // drivers apparently do not function correctly without it.
3626 RtApi::tickStreamTime();
3630 static void sampleRateChanged( ASIOSampleRate sRate )
3632 // The ASIO documentation says that this usually only happens during
3633 // external sync. Audio processing is not stopped by the driver,
3634 // actual sample rate might not have even changed, maybe only the
3635 // sample rate status of an AES/EBU or S/PDIF digital input at the
3638 RtApi *object = (RtApi *) asioCallbackInfo->object;
3640 object->stopStream();
3642 catch ( RtAudioError &exception ) {
3643 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3647 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3650 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3654 switch( selector ) {
3655 case kAsioSelectorSupported:
3656 if ( value == kAsioResetRequest
3657 || value == kAsioEngineVersion
3658 || value == kAsioResyncRequest
3659 || value == kAsioLatenciesChanged
3660 // The following three were added for ASIO 2.0, you don't
3661 // necessarily have to support them.
3662 || value == kAsioSupportsTimeInfo
3663 || value == kAsioSupportsTimeCode
3664 || value == kAsioSupportsInputMonitor)
3667 case kAsioResetRequest:
3668 // Defer the task and perform the reset of the driver during the
3669 // next "safe" situation. You cannot reset the driver right now,
3670 // as this code is called from the driver. Reset the driver is
3671 // done by completely destruct is. I.e. ASIOStop(),
3672 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3674 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3677 case kAsioResyncRequest:
3678 // This informs the application that the driver encountered some
3679 // non-fatal data loss. It is used for synchronization purposes
3680 // of different media. Added mainly to work around the Win16Mutex
3681 // problems in Windows 95/98 with the Windows Multimedia system,
3682 // which could lose data because the Mutex was held too long by
3683 // another thread. However a driver can issue it in other
3685 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3689 case kAsioLatenciesChanged:
3690 // This will inform the host application that the drivers were
3691 // latencies changed. Beware, it this does not mean that the
3692 // buffer sizes have changed! You might need to update internal
3694 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3697 case kAsioEngineVersion:
3698 // Return the supported ASIO version of the host application. If
3699 // a host application does not implement this selector, ASIO 1.0
3700 // is assumed by the driver.
3703 case kAsioSupportsTimeInfo:
3704 // Informs the driver whether the
3705 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3706 // For compatibility with ASIO 1.0 drivers the host application
3707 // should always support the "old" bufferSwitch method, too.
3710 case kAsioSupportsTimeCode:
3711 // Informs the driver whether application is interested in time
3712 // code info. If an application does not need to know about time
3713 // code, the driver has less work to do.
3720 static const char* getAsioErrorString( ASIOError result )
3728 static const Messages m[] =
3730 { ASE_NotPresent, "Hardware input or output is not present or available." },
3731 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3732 { ASE_InvalidParameter, "Invalid input parameter." },
3733 { ASE_InvalidMode, "Invalid mode." },
3734 { ASE_SPNotAdvancing, "Sample position not advancing." },
3735 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3736 { ASE_NoMemory, "Not enough memory to complete the request." }
3739 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3740 if ( m[i].value == result ) return m[i].message;
3742 return "Unknown error.";
3745 //******************** End of __WINDOWS_ASIO__ *********************//
3749 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3751 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3752 // - Introduces support for the Windows WASAPI API
3753 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3754 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3755 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3762 #include <mferror.h>
3764 #include <mftransform.h>
3765 #include <wmcodecdsp.h>
3767 #include <audioclient.h>
3769 #include <mmdeviceapi.h>
3770 #include <functiondiscoverykeys_devpkey.h>
3772 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3773 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3776 #ifndef MFSTARTUP_NOSOCKET
3777 #define MFSTARTUP_NOSOCKET 0x1
3781 #pragma comment( lib, "ksuser" )
3782 #pragma comment( lib, "mfplat.lib" )
3783 #pragma comment( lib, "mfuuid.lib" )
3784 #pragma comment( lib, "wmcodecdspuuid" )
3787 //=============================================================================
3789 #define SAFE_RELEASE( objectPtr )\
3792 objectPtr->Release();\
3796 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3798 //-----------------------------------------------------------------------------
3800 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3801 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3802 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3803 // provide intermediate storage for read / write synchronization.
3817 // sets the length of the internal ring buffer
3818 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3821 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3823 bufferSize_ = bufferSize;
3828 // attempt to push a buffer into the ring buffer at the current "in" index
3829 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3831 if ( !buffer || // incoming buffer is NULL
3832 bufferSize == 0 || // incoming buffer has no data
3833 bufferSize > bufferSize_ ) // incoming buffer too large
3838 unsigned int relOutIndex = outIndex_;
3839 unsigned int inIndexEnd = inIndex_ + bufferSize;
3840 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3841 relOutIndex += bufferSize_;
3844 // the "IN" index CAN BEGIN at the "OUT" index
3845 // the "IN" index CANNOT END at the "OUT" index
3846 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3847 return false; // not enough space between "in" index and "out" index
3850 // copy buffer from external to internal
3851 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3852 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3853 int fromInSize = bufferSize - fromZeroSize;
3858 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3859 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3861 case RTAUDIO_SINT16:
3862 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3863 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3865 case RTAUDIO_SINT24:
3866 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3867 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3869 case RTAUDIO_SINT32:
3870 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3871 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3873 case RTAUDIO_FLOAT32:
3874 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3875 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3877 case RTAUDIO_FLOAT64:
3878 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3879 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3883 // update "in" index
3884 inIndex_ += bufferSize;
3885 inIndex_ %= bufferSize_;
3890 // attempt to pull a buffer from the ring buffer from the current "out" index
3891 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3893 if ( !buffer || // incoming buffer is NULL
3894 bufferSize == 0 || // incoming buffer has no data
3895 bufferSize > bufferSize_ ) // incoming buffer too large
3900 unsigned int relInIndex = inIndex_;
3901 unsigned int outIndexEnd = outIndex_ + bufferSize;
3902 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3903 relInIndex += bufferSize_;
3906 // the "OUT" index CANNOT BEGIN at the "IN" index
3907 // the "OUT" index CAN END at the "IN" index
3908 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3909 return false; // not enough space between "out" index and "in" index
3912 // copy buffer from internal to external
3913 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3914 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3915 int fromOutSize = bufferSize - fromZeroSize;
3920 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3921 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3923 case RTAUDIO_SINT16:
3924 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3925 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3927 case RTAUDIO_SINT24:
3928 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3929 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3931 case RTAUDIO_SINT32:
3932 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3933 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3935 case RTAUDIO_FLOAT32:
3936 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3937 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3939 case RTAUDIO_FLOAT64:
3940 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3941 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3945 // update "out" index
3946 outIndex_ += bufferSize;
3947 outIndex_ %= bufferSize_;
3954 unsigned int bufferSize_;
3955 unsigned int inIndex_;
3956 unsigned int outIndex_;
3959 //-----------------------------------------------------------------------------
3961 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3962 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3963 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3964 class WasapiResampler
3967 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3968 unsigned int inSampleRate, unsigned int outSampleRate )
3969 : _bytesPerSample( bitsPerSample / 8 )
3970 , _channelCount( channelCount )
3971 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3972 , _transformUnk( NULL )
3973 , _transform( NULL )
3974 , _mediaType( NULL )
3975 , _inputMediaType( NULL )
3976 , _outputMediaType( NULL )
3978 #ifdef __IWMResamplerProps_FWD_DEFINED__
3979 , _resamplerProps( NULL )
3982 // 1. Initialization
3984 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3986 // 2. Create Resampler Transform Object
3988 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3989 IID_IUnknown, ( void** ) &_transformUnk );
3991 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3993 #ifdef __IWMResamplerProps_FWD_DEFINED__
3994 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3995 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3998 // 3. Specify input / output format
4000 MFCreateMediaType( &_mediaType );
4001 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4002 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4003 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4004 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4005 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4006 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4007 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4008 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4010 MFCreateMediaType( &_inputMediaType );
4011 _mediaType->CopyAllItems( _inputMediaType );
4013 _transform->SetInputType( 0, _inputMediaType, 0 );
4015 MFCreateMediaType( &_outputMediaType );
4016 _mediaType->CopyAllItems( _outputMediaType );
4018 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4019 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4021 _transform->SetOutputType( 0, _outputMediaType, 0 );
4023 // 4. Send stream start messages to Resampler
4025 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4026 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4027 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4032 // 8. Send stream stop messages to Resampler
4034 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4035 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4041 SAFE_RELEASE( _transformUnk );
4042 SAFE_RELEASE( _transform );
4043 SAFE_RELEASE( _mediaType );
4044 SAFE_RELEASE( _inputMediaType );
4045 SAFE_RELEASE( _outputMediaType );
4047 #ifdef __IWMResamplerProps_FWD_DEFINED__
4048 SAFE_RELEASE( _resamplerProps );
4052 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4054 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4055 if ( _sampleRatio == 1 )
4057 // no sample rate conversion required
4058 memcpy( outBuffer, inBuffer, inputBufferSize );
4059 outSampleCount = inSampleCount;
4063 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4065 IMFMediaBuffer* rInBuffer;
4066 IMFSample* rInSample;
4067 BYTE* rInByteBuffer = NULL;
4069 // 5. Create Sample object from input data
4071 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4073 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4074 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4075 rInBuffer->Unlock();
4076 rInByteBuffer = NULL;
4078 rInBuffer->SetCurrentLength( inputBufferSize );
4080 MFCreateSample( &rInSample );
4081 rInSample->AddBuffer( rInBuffer );
4083 // 6. Pass input data to Resampler
4085 _transform->ProcessInput( 0, rInSample, 0 );
4087 SAFE_RELEASE( rInBuffer );
4088 SAFE_RELEASE( rInSample );
4090 // 7. Perform sample rate conversion
4092 IMFMediaBuffer* rOutBuffer = NULL;
4093 BYTE* rOutByteBuffer = NULL;
4095 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4097 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4099 // 7.1 Create Sample object for output data
4101 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4102 MFCreateSample( &( rOutDataBuffer.pSample ) );
4103 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4104 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4105 rOutDataBuffer.dwStreamID = 0;
4106 rOutDataBuffer.dwStatus = 0;
4107 rOutDataBuffer.pEvents = NULL;
4109 // 7.2 Get output data from Resampler
4111 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4114 SAFE_RELEASE( rOutBuffer );
4115 SAFE_RELEASE( rOutDataBuffer.pSample );
4119 // 7.3 Write output data to outBuffer
4121 SAFE_RELEASE( rOutBuffer );
4122 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4123 rOutBuffer->GetCurrentLength( &rBytes );
4125 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4126 memcpy( outBuffer, rOutByteBuffer, rBytes );
4127 rOutBuffer->Unlock();
4128 rOutByteBuffer = NULL;
4130 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4131 SAFE_RELEASE( rOutBuffer );
4132 SAFE_RELEASE( rOutDataBuffer.pSample );
4136 unsigned int _bytesPerSample;
4137 unsigned int _channelCount;
4140 IUnknown* _transformUnk;
4141 IMFTransform* _transform;
4142 IMFMediaType* _mediaType;
4143 IMFMediaType* _inputMediaType;
4144 IMFMediaType* _outputMediaType;
4146 #ifdef __IWMResamplerProps_FWD_DEFINED__
4147 IWMResamplerProps* _resamplerProps;
4151 //-----------------------------------------------------------------------------
4153 // A structure to hold various information related to the WASAPI implementation.
4156 IAudioClient* captureAudioClient;
4157 IAudioClient* renderAudioClient;
4158 IAudioCaptureClient* captureClient;
4159 IAudioRenderClient* renderClient;
4160 HANDLE captureEvent;
4164 : captureAudioClient( NULL ),
4165 renderAudioClient( NULL ),
4166 captureClient( NULL ),
4167 renderClient( NULL ),
4168 captureEvent( NULL ),
4169 renderEvent( NULL ) {}
4172 //=============================================================================
4174 RtApiWasapi::RtApiWasapi()
4175 : coInitialized_( false ), deviceEnumerator_( NULL )
4177 // WASAPI can run either apartment or multi-threaded
4178 HRESULT hr = CoInitialize( NULL );
4179 if ( !FAILED( hr ) )
4180 coInitialized_ = true;
4182 // Instantiate device enumerator
4183 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4184 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4185 ( void** ) &deviceEnumerator_ );
4187 // If this runs on an old Windows, it will fail. Ignore and proceed.
4189 deviceEnumerator_ = NULL;
4192 //-----------------------------------------------------------------------------
4194 RtApiWasapi::~RtApiWasapi()
4196 if ( stream_.state != STREAM_CLOSED )
4199 SAFE_RELEASE( deviceEnumerator_ );
4201 // If this object previously called CoInitialize()
4202 if ( coInitialized_ )
4206 //=============================================================================
4208 unsigned int RtApiWasapi::getDeviceCount( void )
4210 unsigned int captureDeviceCount = 0;
4211 unsigned int renderDeviceCount = 0;
4213 IMMDeviceCollection* captureDevices = NULL;
4214 IMMDeviceCollection* renderDevices = NULL;
4216 if ( !deviceEnumerator_ )
4219 // Count capture devices
4221 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4222 if ( FAILED( hr ) ) {
4223 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4227 hr = captureDevices->GetCount( &captureDeviceCount );
4228 if ( FAILED( hr ) ) {
4229 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4233 // Count render devices
4234 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4235 if ( FAILED( hr ) ) {
4236 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4240 hr = renderDevices->GetCount( &renderDeviceCount );
4241 if ( FAILED( hr ) ) {
4242 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4247 // release all references
4248 SAFE_RELEASE( captureDevices );
4249 SAFE_RELEASE( renderDevices );
4251 if ( errorText_.empty() )
4252 return captureDeviceCount + renderDeviceCount;
4254 error( RtAudioError::DRIVER_ERROR );
4258 //-----------------------------------------------------------------------------
4260 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4262 RtAudio::DeviceInfo info;
4263 unsigned int captureDeviceCount = 0;
4264 unsigned int renderDeviceCount = 0;
4265 std::string defaultDeviceName;
4266 bool isCaptureDevice = false;
4268 PROPVARIANT deviceNameProp;
4269 PROPVARIANT defaultDeviceNameProp;
4271 IMMDeviceCollection* captureDevices = NULL;
4272 IMMDeviceCollection* renderDevices = NULL;
4273 IMMDevice* devicePtr = NULL;
4274 IMMDevice* defaultDevicePtr = NULL;
4275 IAudioClient* audioClient = NULL;
4276 IPropertyStore* devicePropStore = NULL;
4277 IPropertyStore* defaultDevicePropStore = NULL;
4279 WAVEFORMATEX* deviceFormat = NULL;
4280 WAVEFORMATEX* closestMatchFormat = NULL;
4283 info.probed = false;
4285 // Count capture devices
4287 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4288 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4289 if ( FAILED( hr ) ) {
4290 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4294 hr = captureDevices->GetCount( &captureDeviceCount );
4295 if ( FAILED( hr ) ) {
4296 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4300 // Count render devices
4301 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4302 if ( FAILED( hr ) ) {
4303 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4307 hr = renderDevices->GetCount( &renderDeviceCount );
4308 if ( FAILED( hr ) ) {
4309 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4313 // validate device index
4314 if ( device >= captureDeviceCount + renderDeviceCount ) {
4315 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4316 errorType = RtAudioError::INVALID_USE;
4320 // determine whether index falls within capture or render devices
4321 if ( device >= renderDeviceCount ) {
4322 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4323 if ( FAILED( hr ) ) {
4324 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4327 isCaptureDevice = true;
4330 hr = renderDevices->Item( device, &devicePtr );
4331 if ( FAILED( hr ) ) {
4332 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4335 isCaptureDevice = false;
4338 // get default device name
4339 if ( isCaptureDevice ) {
4340 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4341 if ( FAILED( hr ) ) {
4342 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4347 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4348 if ( FAILED( hr ) ) {
4349 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4354 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4355 if ( FAILED( hr ) ) {
4356 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4359 PropVariantInit( &defaultDeviceNameProp );
4361 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4362 if ( FAILED( hr ) ) {
4363 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4367 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4370 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4371 if ( FAILED( hr ) ) {
4372 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4376 PropVariantInit( &deviceNameProp );
4378 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4379 if ( FAILED( hr ) ) {
4380 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4384 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4387 if ( isCaptureDevice ) {
4388 info.isDefaultInput = info.name == defaultDeviceName;
4389 info.isDefaultOutput = false;
4392 info.isDefaultInput = false;
4393 info.isDefaultOutput = info.name == defaultDeviceName;
4397 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4398 if ( FAILED( hr ) ) {
4399 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4403 hr = audioClient->GetMixFormat( &deviceFormat );
4404 if ( FAILED( hr ) ) {
4405 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4409 if ( isCaptureDevice ) {
4410 info.inputChannels = deviceFormat->nChannels;
4411 info.outputChannels = 0;
4412 info.duplexChannels = 0;
4415 info.inputChannels = 0;
4416 info.outputChannels = deviceFormat->nChannels;
4417 info.duplexChannels = 0;
4421 info.sampleRates.clear();
4423 // allow support for all sample rates as we have a built-in sample rate converter
4424 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4425 info.sampleRates.push_back( SAMPLE_RATES[i] );
4427 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4430 info.nativeFormats = 0;
4432 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4433 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4434 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4436 if ( deviceFormat->wBitsPerSample == 32 ) {
4437 info.nativeFormats |= RTAUDIO_FLOAT32;
4439 else if ( deviceFormat->wBitsPerSample == 64 ) {
4440 info.nativeFormats |= RTAUDIO_FLOAT64;
4443 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4444 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4445 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4447 if ( deviceFormat->wBitsPerSample == 8 ) {
4448 info.nativeFormats |= RTAUDIO_SINT8;
4450 else if ( deviceFormat->wBitsPerSample == 16 ) {
4451 info.nativeFormats |= RTAUDIO_SINT16;
4453 else if ( deviceFormat->wBitsPerSample == 24 ) {
4454 info.nativeFormats |= RTAUDIO_SINT24;
4456 else if ( deviceFormat->wBitsPerSample == 32 ) {
4457 info.nativeFormats |= RTAUDIO_SINT32;
4465 // release all references
4466 PropVariantClear( &deviceNameProp );
4467 PropVariantClear( &defaultDeviceNameProp );
4469 SAFE_RELEASE( captureDevices );
4470 SAFE_RELEASE( renderDevices );
4471 SAFE_RELEASE( devicePtr );
4472 SAFE_RELEASE( defaultDevicePtr );
4473 SAFE_RELEASE( audioClient );
4474 SAFE_RELEASE( devicePropStore );
4475 SAFE_RELEASE( defaultDevicePropStore );
4477 CoTaskMemFree( deviceFormat );
4478 CoTaskMemFree( closestMatchFormat );
4480 if ( !errorText_.empty() )
4485 //-----------------------------------------------------------------------------
4487 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4489 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4490 if ( getDeviceInfo( i ).isDefaultOutput ) {
4498 //-----------------------------------------------------------------------------
4500 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4502 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4503 if ( getDeviceInfo( i ).isDefaultInput ) {
4511 //-----------------------------------------------------------------------------
4513 void RtApiWasapi::closeStream( void )
4515 if ( stream_.state == STREAM_CLOSED ) {
4516 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4517 error( RtAudioError::WARNING );
4521 if ( stream_.state != STREAM_STOPPED )
4524 // clean up stream memory
4525 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4526 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4528 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4529 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4531 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4532 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4534 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4535 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4537 delete ( WasapiHandle* ) stream_.apiHandle;
4538 stream_.apiHandle = NULL;
4540 for ( int i = 0; i < 2; i++ ) {
4541 if ( stream_.userBuffer[i] ) {
4542 free( stream_.userBuffer[i] );
4543 stream_.userBuffer[i] = 0;
4547 if ( stream_.deviceBuffer ) {
4548 free( stream_.deviceBuffer );
4549 stream_.deviceBuffer = 0;
4552 // update stream state
4553 stream_.state = STREAM_CLOSED;
4556 //-----------------------------------------------------------------------------
4558 void RtApiWasapi::startStream( void )
4562 if ( stream_.state == STREAM_RUNNING ) {
4563 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4564 error( RtAudioError::WARNING );
4568 #if defined( HAVE_GETTIMEOFDAY )
4569 gettimeofday( &stream_.lastTickTimestamp, NULL );
4572 // update stream state
4573 stream_.state = STREAM_RUNNING;
4575 // create WASAPI stream thread
4576 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4578 if ( !stream_.callbackInfo.thread ) {
4579 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4580 error( RtAudioError::THREAD_ERROR );
4583 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4584 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4588 //-----------------------------------------------------------------------------
4590 void RtApiWasapi::stopStream( void )
4594 if ( stream_.state == STREAM_STOPPED ) {
4595 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4596 error( RtAudioError::WARNING );
4600 // inform stream thread by setting stream state to STREAM_STOPPING
4601 stream_.state = STREAM_STOPPING;
4603 // wait until stream thread is stopped
4604 while( stream_.state != STREAM_STOPPED ) {
4608 // Wait for the last buffer to play before stopping.
4609 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4611 // close thread handle
4612 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4613 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4614 error( RtAudioError::THREAD_ERROR );
4618 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4621 //-----------------------------------------------------------------------------
4623 void RtApiWasapi::abortStream( void )
4627 if ( stream_.state == STREAM_STOPPED ) {
4628 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4629 error( RtAudioError::WARNING );
4633 // inform stream thread by setting stream state to STREAM_STOPPING
4634 stream_.state = STREAM_STOPPING;
4636 // wait until stream thread is stopped
4637 while ( stream_.state != STREAM_STOPPED ) {
4641 // close thread handle
4642 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4643 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4644 error( RtAudioError::THREAD_ERROR );
4648 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4651 //-----------------------------------------------------------------------------
4653 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4654 unsigned int firstChannel, unsigned int sampleRate,
4655 RtAudioFormat format, unsigned int* bufferSize,
4656 RtAudio::StreamOptions* options )
4658 bool methodResult = FAILURE;
4659 unsigned int captureDeviceCount = 0;
4660 unsigned int renderDeviceCount = 0;
4662 IMMDeviceCollection* captureDevices = NULL;
4663 IMMDeviceCollection* renderDevices = NULL;
4664 IMMDevice* devicePtr = NULL;
4665 WAVEFORMATEX* deviceFormat = NULL;
4666 unsigned int bufferBytes;
4667 stream_.state = STREAM_STOPPED;
4669 // create API Handle if not already created
4670 if ( !stream_.apiHandle )
4671 stream_.apiHandle = ( void* ) new WasapiHandle();
4673 // Count capture devices
4675 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4676 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4677 if ( FAILED( hr ) ) {
4678 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4682 hr = captureDevices->GetCount( &captureDeviceCount );
4683 if ( FAILED( hr ) ) {
4684 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4688 // Count render devices
4689 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4690 if ( FAILED( hr ) ) {
4691 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4695 hr = renderDevices->GetCount( &renderDeviceCount );
4696 if ( FAILED( hr ) ) {
4697 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4701 // validate device index
4702 if ( device >= captureDeviceCount + renderDeviceCount ) {
4703 errorType = RtAudioError::INVALID_USE;
4704 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4708 // if device index falls within capture devices
4709 if ( device >= renderDeviceCount ) {
4710 if ( mode != INPUT ) {
4711 errorType = RtAudioError::INVALID_USE;
4712 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4716 // retrieve captureAudioClient from devicePtr
4717 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4719 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4720 if ( FAILED( hr ) ) {
4721 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4725 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4726 NULL, ( void** ) &captureAudioClient );
4727 if ( FAILED( hr ) ) {
4728 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4732 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4733 if ( FAILED( hr ) ) {
4734 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4738 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4739 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4742 // if device index falls within render devices and is configured for loopback
4743 if ( device < renderDeviceCount && mode == INPUT )
4745 // if renderAudioClient is not initialised, initialise it now
4746 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4747 if ( !renderAudioClient )
4749 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4752 // retrieve captureAudioClient from devicePtr
4753 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4755 hr = renderDevices->Item( device, &devicePtr );
4756 if ( FAILED( hr ) ) {
4757 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4761 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4762 NULL, ( void** ) &captureAudioClient );
4763 if ( FAILED( hr ) ) {
4764 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4768 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4774 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4775 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4778 // if device index falls within render devices and is configured for output
4779 if ( device < renderDeviceCount && mode == OUTPUT )
4781 // if renderAudioClient is already initialised, don't initialise it again
4782 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4783 if ( renderAudioClient )
4785 methodResult = SUCCESS;
4789 hr = renderDevices->Item( device, &devicePtr );
4790 if ( FAILED( hr ) ) {
4791 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4795 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4796 NULL, ( void** ) &renderAudioClient );
4797 if ( FAILED( hr ) ) {
4798 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4802 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4803 if ( FAILED( hr ) ) {
4804 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4808 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4809 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4813 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4814 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4815 stream_.mode = DUPLEX;
4818 stream_.mode = mode;
4821 stream_.device[mode] = device;
4822 stream_.doByteSwap[mode] = false;
4823 stream_.sampleRate = sampleRate;
4824 stream_.bufferSize = *bufferSize;
4825 stream_.nBuffers = 1;
4826 stream_.nUserChannels[mode] = channels;
4827 stream_.channelOffset[mode] = firstChannel;
4828 stream_.userFormat = format;
4829 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4831 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4832 stream_.userInterleaved = false;
4834 stream_.userInterleaved = true;
4835 stream_.deviceInterleaved[mode] = true;
4837 // Set flags for buffer conversion.
4838 stream_.doConvertBuffer[mode] = false;
4839 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4840 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4841 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4842 stream_.doConvertBuffer[mode] = true;
4843 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4844 stream_.nUserChannels[mode] > 1 )
4845 stream_.doConvertBuffer[mode] = true;
4847 if ( stream_.doConvertBuffer[mode] )
4848 setConvertInfo( mode, 0 );
4850 // Allocate necessary internal buffers
4851 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4853 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4854 if ( !stream_.userBuffer[mode] ) {
4855 errorType = RtAudioError::MEMORY_ERROR;
4856 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4860 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4861 stream_.callbackInfo.priority = 15;
4863 stream_.callbackInfo.priority = 0;
4865 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4866 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4868 methodResult = SUCCESS;
4872 SAFE_RELEASE( captureDevices );
4873 SAFE_RELEASE( renderDevices );
4874 SAFE_RELEASE( devicePtr );
4875 CoTaskMemFree( deviceFormat );
4877 // if method failed, close the stream
4878 if ( methodResult == FAILURE )
4881 if ( !errorText_.empty() )
4883 return methodResult;
4886 //=============================================================================
4888 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4891 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4896 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4899 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4904 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4907 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4912 //-----------------------------------------------------------------------------
4914 void RtApiWasapi::wasapiThread()
4916 // as this is a new thread, we must CoInitialize it
4917 CoInitialize( NULL );
4921 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4922 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4923 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4924 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4925 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4926 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4928 WAVEFORMATEX* captureFormat = NULL;
4929 WAVEFORMATEX* renderFormat = NULL;
4930 float captureSrRatio = 0.0f;
4931 float renderSrRatio = 0.0f;
4932 WasapiBuffer captureBuffer;
4933 WasapiBuffer renderBuffer;
4934 WasapiResampler* captureResampler = NULL;
4935 WasapiResampler* renderResampler = NULL;
4937 // declare local stream variables
4938 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4939 BYTE* streamBuffer = NULL;
4940 unsigned long captureFlags = 0;
4941 unsigned int bufferFrameCount = 0;
4942 unsigned int numFramesPadding = 0;
4943 unsigned int convBufferSize = 0;
4944 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4945 bool callbackPushed = true;
4946 bool callbackPulled = false;
4947 bool callbackStopped = false;
4948 int callbackResult = 0;
4950 // convBuffer is used to store converted buffers between WASAPI and the user
4951 char* convBuffer = NULL;
4952 unsigned int convBuffSize = 0;
4953 unsigned int deviceBuffSize = 0;
4955 std::string errorText;
4956 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4958 // Attempt to assign "Pro Audio" characteristic to thread
4959 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4961 DWORD taskIndex = 0;
4962 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4963 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4964 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4965 FreeLibrary( AvrtDll );
4968 // start capture stream if applicable
4969 if ( captureAudioClient ) {
4970 hr = captureAudioClient->GetMixFormat( &captureFormat );
4971 if ( FAILED( hr ) ) {
4972 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4976 // init captureResampler
4977 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4978 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4979 captureFormat->nSamplesPerSec, stream_.sampleRate );
4981 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4983 if ( !captureClient ) {
4984 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4985 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4990 if ( FAILED( hr ) ) {
4991 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4995 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4996 ( void** ) &captureClient );
4997 if ( FAILED( hr ) ) {
4998 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5002 // don't configure captureEvent if in loopback mode
5003 if ( !loopbackEnabled )
5005 // configure captureEvent to trigger on every available capture buffer
5006 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5007 if ( !captureEvent ) {
5008 errorType = RtAudioError::SYSTEM_ERROR;
5009 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5013 hr = captureAudioClient->SetEventHandle( captureEvent );
5014 if ( FAILED( hr ) ) {
5015 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5019 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5022 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5024 // reset the capture stream
5025 hr = captureAudioClient->Reset();
5026 if ( FAILED( hr ) ) {
5027 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5031 // start the capture stream
5032 hr = captureAudioClient->Start();
5033 if ( FAILED( hr ) ) {
5034 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5039 unsigned int inBufferSize = 0;
5040 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5041 if ( FAILED( hr ) ) {
5042 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5046 // scale outBufferSize according to stream->user sample rate ratio
5047 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5048 inBufferSize *= stream_.nDeviceChannels[INPUT];
5050 // set captureBuffer size
5051 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5054 // start render stream if applicable
5055 if ( renderAudioClient ) {
5056 hr = renderAudioClient->GetMixFormat( &renderFormat );
5057 if ( FAILED( hr ) ) {
5058 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5062 // init renderResampler
5063 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5064 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5065 stream_.sampleRate, renderFormat->nSamplesPerSec );
5067 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5069 if ( !renderClient ) {
5070 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5071 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5076 if ( FAILED( hr ) ) {
5077 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5081 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5082 ( void** ) &renderClient );
5083 if ( FAILED( hr ) ) {
5084 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5088 // configure renderEvent to trigger on every available render buffer
5089 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5090 if ( !renderEvent ) {
5091 errorType = RtAudioError::SYSTEM_ERROR;
5092 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5096 hr = renderAudioClient->SetEventHandle( renderEvent );
5097 if ( FAILED( hr ) ) {
5098 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5102 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5103 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5105 // reset the render stream
5106 hr = renderAudioClient->Reset();
5107 if ( FAILED( hr ) ) {
5108 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5112 // start the render stream
5113 hr = renderAudioClient->Start();
5114 if ( FAILED( hr ) ) {
5115 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5120 unsigned int outBufferSize = 0;
5121 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5122 if ( FAILED( hr ) ) {
5123 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5127 // scale inBufferSize according to user->stream sample rate ratio
5128 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5129 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5131 // set renderBuffer size
5132 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5135 // malloc buffer memory
5136 if ( stream_.mode == INPUT )
5138 using namespace std; // for ceilf
5139 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5140 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5142 else if ( stream_.mode == OUTPUT )
5144 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5145 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5147 else if ( stream_.mode == DUPLEX )
5149 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5150 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5151 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5152 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5155 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5156 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5157 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5158 if ( !convBuffer || !stream_.deviceBuffer ) {
5159 errorType = RtAudioError::MEMORY_ERROR;
5160 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5164 // stream process loop
5165 while ( stream_.state != STREAM_STOPPING ) {
5166 if ( !callbackPulled ) {
5169 // 1. Pull callback buffer from inputBuffer
5170 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5171 // Convert callback buffer to user format
5173 if ( captureAudioClient )
5175 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5176 if ( captureSrRatio != 1 )
5178 // account for remainders
5183 while ( convBufferSize < stream_.bufferSize )
5185 // Pull callback buffer from inputBuffer
5186 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5187 samplesToPull * stream_.nDeviceChannels[INPUT],
5188 stream_.deviceFormat[INPUT] );
5190 if ( !callbackPulled )
5195 // Convert callback buffer to user sample rate
5196 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5197 unsigned int convSamples = 0;
5199 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5204 convBufferSize += convSamples;
5205 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5208 if ( callbackPulled )
5210 if ( stream_.doConvertBuffer[INPUT] ) {
5211 // Convert callback buffer to user format
5212 convertBuffer( stream_.userBuffer[INPUT],
5213 stream_.deviceBuffer,
5214 stream_.convertInfo[INPUT] );
5217 // no further conversion, simple copy deviceBuffer to userBuffer
5218 memcpy( stream_.userBuffer[INPUT],
5219 stream_.deviceBuffer,
5220 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5225 // if there is no capture stream, set callbackPulled flag
5226 callbackPulled = true;
5231 // 1. Execute user callback method
5232 // 2. Handle return value from callback
5234 // if callback has not requested the stream to stop
5235 if ( callbackPulled && !callbackStopped ) {
5236 // Execute user callback method
5237 callbackResult = callback( stream_.userBuffer[OUTPUT],
5238 stream_.userBuffer[INPUT],
5241 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5242 stream_.callbackInfo.userData );
5245 RtApi::tickStreamTime();
5247 // Handle return value from callback
5248 if ( callbackResult == 1 ) {
5249 // instantiate a thread to stop this thread
5250 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5251 if ( !threadHandle ) {
5252 errorType = RtAudioError::THREAD_ERROR;
5253 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5256 else if ( !CloseHandle( threadHandle ) ) {
5257 errorType = RtAudioError::THREAD_ERROR;
5258 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5262 callbackStopped = true;
5264 else if ( callbackResult == 2 ) {
5265 // instantiate a thread to stop this thread
5266 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5267 if ( !threadHandle ) {
5268 errorType = RtAudioError::THREAD_ERROR;
5269 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5272 else if ( !CloseHandle( threadHandle ) ) {
5273 errorType = RtAudioError::THREAD_ERROR;
5274 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5278 callbackStopped = true;
5285 // 1. Convert callback buffer to stream format
5286 // 2. Convert callback buffer to stream sample rate and channel count
5287 // 3. Push callback buffer into outputBuffer
5289 if ( renderAudioClient && callbackPulled )
5291 // if the last call to renderBuffer.PushBuffer() was successful
5292 if ( callbackPushed || convBufferSize == 0 )
5294 if ( stream_.doConvertBuffer[OUTPUT] )
5296 // Convert callback buffer to stream format
5297 convertBuffer( stream_.deviceBuffer,
5298 stream_.userBuffer[OUTPUT],
5299 stream_.convertInfo[OUTPUT] );
5303 // no further conversion, simple copy userBuffer to deviceBuffer
5304 memcpy( stream_.deviceBuffer,
5305 stream_.userBuffer[OUTPUT],
5306 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5309 // Convert callback buffer to stream sample rate
5310 renderResampler->Convert( convBuffer,
5311 stream_.deviceBuffer,
5316 // Push callback buffer into outputBuffer
5317 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5318 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5319 stream_.deviceFormat[OUTPUT] );
5322 // if there is no render stream, set callbackPushed flag
5323 callbackPushed = true;
5328 // 1. Get capture buffer from stream
5329 // 2. Push capture buffer into inputBuffer
5330 // 3. If 2. was successful: Release capture buffer
5332 if ( captureAudioClient ) {
5333 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5334 if ( !callbackPulled ) {
5335 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5338 // Get capture buffer from stream
5339 hr = captureClient->GetBuffer( &streamBuffer,
5341 &captureFlags, NULL, NULL );
5342 if ( FAILED( hr ) ) {
5343 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5347 if ( bufferFrameCount != 0 ) {
5348 // Push capture buffer into inputBuffer
5349 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5350 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5351 stream_.deviceFormat[INPUT] ) )
5353 // Release capture buffer
5354 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5355 if ( FAILED( hr ) ) {
5356 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5362 // Inform WASAPI that capture was unsuccessful
5363 hr = captureClient->ReleaseBuffer( 0 );
5364 if ( FAILED( hr ) ) {
5365 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5372 // Inform WASAPI that capture was unsuccessful
5373 hr = captureClient->ReleaseBuffer( 0 );
5374 if ( FAILED( hr ) ) {
5375 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5383 // 1. Get render buffer from stream
5384 // 2. Pull next buffer from outputBuffer
5385 // 3. If 2. was successful: Fill render buffer with next buffer
5386 // Release render buffer
5388 if ( renderAudioClient ) {
5389 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5390 if ( callbackPulled && !callbackPushed ) {
5391 WaitForSingleObject( renderEvent, INFINITE );
5394 // Get render buffer from stream
5395 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5396 if ( FAILED( hr ) ) {
5397 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5401 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5402 if ( FAILED( hr ) ) {
5403 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5407 bufferFrameCount -= numFramesPadding;
5409 if ( bufferFrameCount != 0 ) {
5410 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5411 if ( FAILED( hr ) ) {
5412 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5416 // Pull next buffer from outputBuffer
5417 // Fill render buffer with next buffer
5418 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5419 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5420 stream_.deviceFormat[OUTPUT] ) )
5422 // Release render buffer
5423 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5424 if ( FAILED( hr ) ) {
5425 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5431 // Inform WASAPI that render was unsuccessful
5432 hr = renderClient->ReleaseBuffer( 0, 0 );
5433 if ( FAILED( hr ) ) {
5434 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5441 // Inform WASAPI that render was unsuccessful
5442 hr = renderClient->ReleaseBuffer( 0, 0 );
5443 if ( FAILED( hr ) ) {
5444 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5450 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5451 if ( callbackPushed ) {
5452 // unsetting the callbackPulled flag lets the stream know that
5453 // the audio device is ready for another callback output buffer.
5454 callbackPulled = false;
5461 CoTaskMemFree( captureFormat );
5462 CoTaskMemFree( renderFormat );
5464 free ( convBuffer );
5465 delete renderResampler;
5466 delete captureResampler;
5470 // update stream state
5471 stream_.state = STREAM_STOPPED;
5473 if ( !errorText.empty() )
5475 errorText_ = errorText;
5480 //******************** End of __WINDOWS_WASAPI__ *********************//
5484 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5486 // Modified by Robin Davies, October 2005
5487 // - Improvements to DirectX pointer chasing.
5488 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5489 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5490 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5491 // Changed device query structure for RtAudio 4.0.7, January 2010
5493 #include <windows.h>
5494 #include <process.h>
5495 #include <mmsystem.h>
5499 #include <algorithm>
5501 #if defined(__MINGW32__)
5502 // missing from latest mingw winapi
5503 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5504 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5505 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5506 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5509 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5511 #ifdef _MSC_VER // if Microsoft Visual C++
5512 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5515 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5517 if ( pointer > bufferSize ) pointer -= bufferSize;
5518 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5519 if ( pointer < earlierPointer ) pointer += bufferSize;
5520 return pointer >= earlierPointer && pointer < laterPointer;
5523 // A structure to hold various information related to the DirectSound
5524 // API implementation.
5526 unsigned int drainCounter; // Tracks callback counts when draining
5527 bool internalDrain; // Indicates if stop is initiated from callback or not.
5531 UINT bufferPointer[2];
5532 DWORD dsBufferSize[2];
5533 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5537 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5540 // Declarations for utility functions, callbacks, and structures
5541 // specific to the DirectSound implementation.
5542 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5543 LPCTSTR description,
5547 static const char* getErrorString( int code );
5549 static unsigned __stdcall callbackHandler( void *ptr );
5558 : found(false) { validId[0] = false; validId[1] = false; }
5561 struct DsProbeData {
5563 std::vector<struct DsDevice>* dsDevices;
5566 RtApiDs :: RtApiDs()
5568 // Dsound will run both-threaded. If CoInitialize fails, then just
5569 // accept whatever the mainline chose for a threading model.
5570 coInitialized_ = false;
5571 HRESULT hr = CoInitialize( NULL );
5572 if ( !FAILED( hr ) ) coInitialized_ = true;
5575 RtApiDs :: ~RtApiDs()
5577 if ( stream_.state != STREAM_CLOSED ) closeStream();
5578 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5581 // The DirectSound default output is always the first device.
5582 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5587 // The DirectSound default input is always the first input device,
5588 // which is the first capture device enumerated.
5589 unsigned int RtApiDs :: getDefaultInputDevice( void )
5594 unsigned int RtApiDs :: getDeviceCount( void )
5596 // Set query flag for previously found devices to false, so that we
5597 // can check for any devices that have disappeared.
5598 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5599 dsDevices[i].found = false;
5601 // Query DirectSound devices.
5602 struct DsProbeData probeInfo;
5603 probeInfo.isInput = false;
5604 probeInfo.dsDevices = &dsDevices;
5605 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5606 if ( FAILED( result ) ) {
5607 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5608 errorText_ = errorStream_.str();
5609 error( RtAudioError::WARNING );
5612 // Query DirectSoundCapture devices.
5613 probeInfo.isInput = true;
5614 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5615 if ( FAILED( result ) ) {
5616 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5617 errorText_ = errorStream_.str();
5618 error( RtAudioError::WARNING );
5621 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5622 for ( unsigned int i=0; i<dsDevices.size(); ) {
5623 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5627 return static_cast<unsigned int>(dsDevices.size());
5630 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5632 RtAudio::DeviceInfo info;
5633 info.probed = false;
5635 if ( dsDevices.size() == 0 ) {
5636 // Force a query of all devices
5638 if ( dsDevices.size() == 0 ) {
5639 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5640 error( RtAudioError::INVALID_USE );
5645 if ( device >= dsDevices.size() ) {
5646 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5647 error( RtAudioError::INVALID_USE );
5652 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5654 LPDIRECTSOUND output;
5656 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5657 if ( FAILED( result ) ) {
5658 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5659 errorText_ = errorStream_.str();
5660 error( RtAudioError::WARNING );
5664 outCaps.dwSize = sizeof( outCaps );
5665 result = output->GetCaps( &outCaps );
5666 if ( FAILED( result ) ) {
5668 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5669 errorText_ = errorStream_.str();
5670 error( RtAudioError::WARNING );
5674 // Get output channel information.
5675 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5677 // Get sample rate information.
5678 info.sampleRates.clear();
5679 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5680 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5681 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5682 info.sampleRates.push_back( SAMPLE_RATES[k] );
5684 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5685 info.preferredSampleRate = SAMPLE_RATES[k];
5689 // Get format information.
5690 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5691 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5695 if ( getDefaultOutputDevice() == device )
5696 info.isDefaultOutput = true;
5698 if ( dsDevices[ device ].validId[1] == false ) {
5699 info.name = dsDevices[ device ].name;
5706 LPDIRECTSOUNDCAPTURE input;
5707 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5708 if ( FAILED( result ) ) {
5709 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5710 errorText_ = errorStream_.str();
5711 error( RtAudioError::WARNING );
5716 inCaps.dwSize = sizeof( inCaps );
5717 result = input->GetCaps( &inCaps );
5718 if ( FAILED( result ) ) {
5720 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5721 errorText_ = errorStream_.str();
5722 error( RtAudioError::WARNING );
5726 // Get input channel information.
5727 info.inputChannels = inCaps.dwChannels;
5729 // Get sample rate and format information.
5730 std::vector<unsigned int> rates;
5731 if ( inCaps.dwChannels >= 2 ) {
5732 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5733 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5734 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5735 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5736 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5737 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5738 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5739 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5741 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5742 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5743 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5744 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5745 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5747 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5748 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5749 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5750 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5751 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5754 else if ( inCaps.dwChannels == 1 ) {
5755 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5756 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5757 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5758 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5759 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5761 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5762 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5764 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5765 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5766 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5767 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5768 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5770 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5771 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5772 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5773 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5774 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5777 else info.inputChannels = 0; // technically, this would be an error
5781 if ( info.inputChannels == 0 ) return info;
5783 // Copy the supported rates to the info structure but avoid duplication.
5785 for ( unsigned int i=0; i<rates.size(); i++ ) {
5787 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5788 if ( rates[i] == info.sampleRates[j] ) {
5793 if ( found == false ) info.sampleRates.push_back( rates[i] );
5795 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5797 // If device opens for both playback and capture, we determine the channels.
5798 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5799 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5801 if ( device == 0 ) info.isDefaultInput = true;
5803 // Copy name and return.
5804 info.name = dsDevices[ device ].name;
5809 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5810 unsigned int firstChannel, unsigned int sampleRate,
5811 RtAudioFormat format, unsigned int *bufferSize,
5812 RtAudio::StreamOptions *options )
5814 if ( channels + firstChannel > 2 ) {
5815 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5819 size_t nDevices = dsDevices.size();
5820 if ( nDevices == 0 ) {
5821 // This should not happen because a check is made before this function is called.
5822 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5826 if ( device >= nDevices ) {
5827 // This should not happen because a check is made before this function is called.
5828 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5832 if ( mode == OUTPUT ) {
5833 if ( dsDevices[ device ].validId[0] == false ) {
5834 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5835 errorText_ = errorStream_.str();
5839 else { // mode == INPUT
5840 if ( dsDevices[ device ].validId[1] == false ) {
5841 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5842 errorText_ = errorStream_.str();
5847 // According to a note in PortAudio, using GetDesktopWindow()
5848 // instead of GetForegroundWindow() is supposed to avoid problems
5849 // that occur when the application's window is not the foreground
5850 // window. Also, if the application window closes before the
5851 // DirectSound buffer, DirectSound can crash. In the past, I had
5852 // problems when using GetDesktopWindow() but it seems fine now
5853 // (January 2010). I'll leave it commented here.
5854 // HWND hWnd = GetForegroundWindow();
5855 HWND hWnd = GetDesktopWindow();
5857 // Check the numberOfBuffers parameter and limit the lowest value to
5858 // two. This is a judgement call and a value of two is probably too
5859 // low for capture, but it should work for playback.
5861 if ( options ) nBuffers = options->numberOfBuffers;
5862 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5863 if ( nBuffers < 2 ) nBuffers = 3;
5865 // Check the lower range of the user-specified buffer size and set
5866 // (arbitrarily) to a lower bound of 32.
5867 if ( *bufferSize < 32 ) *bufferSize = 32;
5869 // Create the wave format structure. The data format setting will
5870 // be determined later.
5871 WAVEFORMATEX waveFormat;
5872 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5873 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5874 waveFormat.nChannels = channels + firstChannel;
5875 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5877 // Determine the device buffer size. By default, we'll use the value
5878 // defined above (32K), but we will grow it to make allowances for
5879 // very large software buffer sizes.
5880 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5881 DWORD dsPointerLeadTime = 0;
5883 void *ohandle = 0, *bhandle = 0;
5885 if ( mode == OUTPUT ) {
5887 LPDIRECTSOUND output;
5888 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5889 if ( FAILED( result ) ) {
5890 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5891 errorText_ = errorStream_.str();
5896 outCaps.dwSize = sizeof( outCaps );
5897 result = output->GetCaps( &outCaps );
5898 if ( FAILED( result ) ) {
5900 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5901 errorText_ = errorStream_.str();
5905 // Check channel information.
5906 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5907 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5908 errorText_ = errorStream_.str();
5912 // Check format information. Use 16-bit format unless not
5913 // supported or user requests 8-bit.
5914 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5915 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5916 waveFormat.wBitsPerSample = 16;
5917 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5920 waveFormat.wBitsPerSample = 8;
5921 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5923 stream_.userFormat = format;
5925 // Update wave format structure and buffer information.
5926 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5927 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5928 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5930 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5931 while ( dsPointerLeadTime * 2U > dsBufferSize )
5934 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5935 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5936 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5937 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5938 if ( FAILED( result ) ) {
5940 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5941 errorText_ = errorStream_.str();
5945 // Even though we will write to the secondary buffer, we need to
5946 // access the primary buffer to set the correct output format
5947 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5948 // buffer description.
5949 DSBUFFERDESC bufferDescription;
5950 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5951 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5952 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5954 // Obtain the primary buffer
5955 LPDIRECTSOUNDBUFFER buffer;
5956 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5957 if ( FAILED( result ) ) {
5959 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5960 errorText_ = errorStream_.str();
5964 // Set the primary DS buffer sound format.
5965 result = buffer->SetFormat( &waveFormat );
5966 if ( FAILED( result ) ) {
5968 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5969 errorText_ = errorStream_.str();
5973 // Setup the secondary DS buffer description.
5974 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5975 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5976 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5977 DSBCAPS_GLOBALFOCUS |
5978 DSBCAPS_GETCURRENTPOSITION2 |
5979 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5980 bufferDescription.dwBufferBytes = dsBufferSize;
5981 bufferDescription.lpwfxFormat = &waveFormat;
5983 // Try to create the secondary DS buffer. If that doesn't work,
5984 // try to use software mixing. Otherwise, there's a problem.
5985 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5986 if ( FAILED( result ) ) {
5987 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5988 DSBCAPS_GLOBALFOCUS |
5989 DSBCAPS_GETCURRENTPOSITION2 |
5990 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5991 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5992 if ( FAILED( result ) ) {
5994 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5995 errorText_ = errorStream_.str();
6000 // Get the buffer size ... might be different from what we specified.
6002 dsbcaps.dwSize = sizeof( DSBCAPS );
6003 result = buffer->GetCaps( &dsbcaps );
6004 if ( FAILED( result ) ) {
6007 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6008 errorText_ = errorStream_.str();
6012 dsBufferSize = dsbcaps.dwBufferBytes;
6014 // Lock the DS buffer
6017 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6018 if ( FAILED( result ) ) {
6021 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6022 errorText_ = errorStream_.str();
6026 // Zero the DS buffer
6027 ZeroMemory( audioPtr, dataLen );
6029 // Unlock the DS buffer
6030 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6031 if ( FAILED( result ) ) {
6034 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6035 errorText_ = errorStream_.str();
6039 ohandle = (void *) output;
6040 bhandle = (void *) buffer;
6043 if ( mode == INPUT ) {
6045 LPDIRECTSOUNDCAPTURE input;
6046 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6047 if ( FAILED( result ) ) {
6048 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6049 errorText_ = errorStream_.str();
6054 inCaps.dwSize = sizeof( inCaps );
6055 result = input->GetCaps( &inCaps );
6056 if ( FAILED( result ) ) {
6058 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6059 errorText_ = errorStream_.str();
6063 // Check channel information.
6064 if ( inCaps.dwChannels < channels + firstChannel ) {
6065 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6069 // Check format information. Use 16-bit format unless user
6071 DWORD deviceFormats;
6072 if ( channels + firstChannel == 2 ) {
6073 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6074 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6075 waveFormat.wBitsPerSample = 8;
6076 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6078 else { // assume 16-bit is supported
6079 waveFormat.wBitsPerSample = 16;
6080 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6083 else { // channel == 1
6084 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6085 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6086 waveFormat.wBitsPerSample = 8;
6087 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6089 else { // assume 16-bit is supported
6090 waveFormat.wBitsPerSample = 16;
6091 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6094 stream_.userFormat = format;
6096 // Update wave format structure and buffer information.
6097 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6098 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6099 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6101 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6102 while ( dsPointerLeadTime * 2U > dsBufferSize )
6105 // Setup the secondary DS buffer description.
6106 DSCBUFFERDESC bufferDescription;
6107 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6108 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6109 bufferDescription.dwFlags = 0;
6110 bufferDescription.dwReserved = 0;
6111 bufferDescription.dwBufferBytes = dsBufferSize;
6112 bufferDescription.lpwfxFormat = &waveFormat;
6114 // Create the capture buffer.
6115 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6116 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6117 if ( FAILED( result ) ) {
6119 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6120 errorText_ = errorStream_.str();
6124 // Get the buffer size ... might be different from what we specified.
6126 dscbcaps.dwSize = sizeof( DSCBCAPS );
6127 result = buffer->GetCaps( &dscbcaps );
6128 if ( FAILED( result ) ) {
6131 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6132 errorText_ = errorStream_.str();
6136 dsBufferSize = dscbcaps.dwBufferBytes;
6138 // NOTE: We could have a problem here if this is a duplex stream
6139 // and the play and capture hardware buffer sizes are different
6140 // (I'm actually not sure if that is a problem or not).
6141 // Currently, we are not verifying that.
6143 // Lock the capture buffer
6146 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6147 if ( FAILED( result ) ) {
6150 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6151 errorText_ = errorStream_.str();
6156 ZeroMemory( audioPtr, dataLen );
6158 // Unlock the buffer
6159 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6160 if ( FAILED( result ) ) {
6163 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6164 errorText_ = errorStream_.str();
6168 ohandle = (void *) input;
6169 bhandle = (void *) buffer;
6172 // Set various stream parameters
6173 DsHandle *handle = 0;
6174 stream_.nDeviceChannels[mode] = channels + firstChannel;
6175 stream_.nUserChannels[mode] = channels;
6176 stream_.bufferSize = *bufferSize;
6177 stream_.channelOffset[mode] = firstChannel;
6178 stream_.deviceInterleaved[mode] = true;
6179 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6180 else stream_.userInterleaved = true;
6182 // Set flag for buffer conversion
6183 stream_.doConvertBuffer[mode] = false;
6184 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6185 stream_.doConvertBuffer[mode] = true;
6186 if (stream_.userFormat != stream_.deviceFormat[mode])
6187 stream_.doConvertBuffer[mode] = true;
6188 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6189 stream_.nUserChannels[mode] > 1 )
6190 stream_.doConvertBuffer[mode] = true;
6192 // Allocate necessary internal buffers
6193 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6195 if ( stream_.userBuffer[mode] == NULL ) {
6196 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6200 if ( stream_.doConvertBuffer[mode] ) {
6202 bool makeBuffer = true;
6203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6204 if ( mode == INPUT ) {
6205 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6206 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6207 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6212 bufferBytes *= *bufferSize;
6213 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6214 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6215 if ( stream_.deviceBuffer == NULL ) {
6216 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6222 // Allocate our DsHandle structures for the stream.
6223 if ( stream_.apiHandle == 0 ) {
6225 handle = new DsHandle;
6227 catch ( std::bad_alloc& ) {
6228 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6232 // Create a manual-reset event.
6233 handle->condition = CreateEvent( NULL, // no security
6234 TRUE, // manual-reset
6235 FALSE, // non-signaled initially
6237 stream_.apiHandle = (void *) handle;
6240 handle = (DsHandle *) stream_.apiHandle;
6241 handle->id[mode] = ohandle;
6242 handle->buffer[mode] = bhandle;
6243 handle->dsBufferSize[mode] = dsBufferSize;
6244 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6246 stream_.device[mode] = device;
6247 stream_.state = STREAM_STOPPED;
6248 if ( stream_.mode == OUTPUT && mode == INPUT )
6249 // We had already set up an output stream.
6250 stream_.mode = DUPLEX;
6252 stream_.mode = mode;
6253 stream_.nBuffers = nBuffers;
6254 stream_.sampleRate = sampleRate;
6256 // Setup the buffer conversion information structure.
6257 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6259 // Setup the callback thread.
6260 if ( stream_.callbackInfo.isRunning == false ) {
6262 stream_.callbackInfo.isRunning = true;
6263 stream_.callbackInfo.object = (void *) this;
6264 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6265 &stream_.callbackInfo, 0, &threadId );
6266 if ( stream_.callbackInfo.thread == 0 ) {
6267 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6271 // Boost DS thread priority
6272 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6278 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6279 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6280 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6281 if ( buffer ) buffer->Release();
6284 if ( handle->buffer[1] ) {
6285 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6286 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6287 if ( buffer ) buffer->Release();
6290 CloseHandle( handle->condition );
6292 stream_.apiHandle = 0;
6295 for ( int i=0; i<2; i++ ) {
6296 if ( stream_.userBuffer[i] ) {
6297 free( stream_.userBuffer[i] );
6298 stream_.userBuffer[i] = 0;
6302 if ( stream_.deviceBuffer ) {
6303 free( stream_.deviceBuffer );
6304 stream_.deviceBuffer = 0;
6307 stream_.state = STREAM_CLOSED;
6311 void RtApiDs :: closeStream()
6313 if ( stream_.state == STREAM_CLOSED ) {
6314 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6315 error( RtAudioError::WARNING );
6319 // Stop the callback thread.
6320 stream_.callbackInfo.isRunning = false;
6321 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6322 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6324 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6326 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6327 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6328 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6335 if ( handle->buffer[1] ) {
6336 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6337 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6344 CloseHandle( handle->condition );
6346 stream_.apiHandle = 0;
6349 for ( int i=0; i<2; i++ ) {
6350 if ( stream_.userBuffer[i] ) {
6351 free( stream_.userBuffer[i] );
6352 stream_.userBuffer[i] = 0;
6356 if ( stream_.deviceBuffer ) {
6357 free( stream_.deviceBuffer );
6358 stream_.deviceBuffer = 0;
6361 stream_.mode = UNINITIALIZED;
6362 stream_.state = STREAM_CLOSED;
6365 void RtApiDs :: startStream()
6368 if ( stream_.state == STREAM_RUNNING ) {
6369 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6370 error( RtAudioError::WARNING );
6374 #if defined( HAVE_GETTIMEOFDAY )
6375 gettimeofday( &stream_.lastTickTimestamp, NULL );
6378 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6380 // Increase scheduler frequency on lesser windows (a side-effect of
6381 // increasing timer accuracy). On greater windows (Win2K or later),
6382 // this is already in effect.
6383 timeBeginPeriod( 1 );
6385 buffersRolling = false;
6386 duplexPrerollBytes = 0;
6388 if ( stream_.mode == DUPLEX ) {
6389 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6390 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6396 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6397 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6398 if ( FAILED( result ) ) {
6399 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6400 errorText_ = errorStream_.str();
6405 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6407 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6408 result = buffer->Start( DSCBSTART_LOOPING );
6409 if ( FAILED( result ) ) {
6410 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6411 errorText_ = errorStream_.str();
6416 handle->drainCounter = 0;
6417 handle->internalDrain = false;
6418 ResetEvent( handle->condition );
6419 stream_.state = STREAM_RUNNING;
6422 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6425 void RtApiDs :: stopStream()
6428 if ( stream_.state == STREAM_STOPPED ) {
6429 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6430 error( RtAudioError::WARNING );
6437 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6438 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6439 if ( handle->drainCounter == 0 ) {
6440 handle->drainCounter = 2;
6441 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6444 stream_.state = STREAM_STOPPED;
6446 MUTEX_LOCK( &stream_.mutex );
6448 // Stop the buffer and clear memory
6449 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6450 result = buffer->Stop();
6451 if ( FAILED( result ) ) {
6452 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6453 errorText_ = errorStream_.str();
6457 // Lock the buffer and clear it so that if we start to play again,
6458 // we won't have old data playing.
6459 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6460 if ( FAILED( result ) ) {
6461 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6462 errorText_ = errorStream_.str();
6466 // Zero the DS buffer
6467 ZeroMemory( audioPtr, dataLen );
6469 // Unlock the DS buffer
6470 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6471 if ( FAILED( result ) ) {
6472 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6473 errorText_ = errorStream_.str();
6477 // If we start playing again, we must begin at beginning of buffer.
6478 handle->bufferPointer[0] = 0;
6481 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6482 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6486 stream_.state = STREAM_STOPPED;
6488 if ( stream_.mode != DUPLEX )
6489 MUTEX_LOCK( &stream_.mutex );
6491 result = buffer->Stop();
6492 if ( FAILED( result ) ) {
6493 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6494 errorText_ = errorStream_.str();
6498 // Lock the buffer and clear it so that if we start to play again,
6499 // we won't have old data playing.
6500 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6501 if ( FAILED( result ) ) {
6502 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6503 errorText_ = errorStream_.str();
6507 // Zero the DS buffer
6508 ZeroMemory( audioPtr, dataLen );
6510 // Unlock the DS buffer
6511 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6512 if ( FAILED( result ) ) {
6513 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6514 errorText_ = errorStream_.str();
6518 // If we start recording again, we must begin at beginning of buffer.
6519 handle->bufferPointer[1] = 0;
6523 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6524 MUTEX_UNLOCK( &stream_.mutex );
6526 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6529 void RtApiDs :: abortStream()
6532 if ( stream_.state == STREAM_STOPPED ) {
6533 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6534 error( RtAudioError::WARNING );
6538 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6539 handle->drainCounter = 2;
6544 void RtApiDs :: callbackEvent()
6546 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6547 Sleep( 50 ); // sleep 50 milliseconds
6551 if ( stream_.state == STREAM_CLOSED ) {
6552 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6553 error( RtAudioError::WARNING );
6557 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6558 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6560 // Check if we were draining the stream and signal is finished.
6561 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6563 stream_.state = STREAM_STOPPING;
6564 if ( handle->internalDrain == false )
6565 SetEvent( handle->condition );
6571 // Invoke user callback to get fresh output data UNLESS we are
6573 if ( handle->drainCounter == 0 ) {
6574 RtAudioCallback callback = (RtAudioCallback) info->callback;
6575 double streamTime = getStreamTime();
6576 RtAudioStreamStatus status = 0;
6577 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6578 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6579 handle->xrun[0] = false;
6581 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6582 status |= RTAUDIO_INPUT_OVERFLOW;
6583 handle->xrun[1] = false;
6585 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6586 stream_.bufferSize, streamTime, status, info->userData );
6587 if ( cbReturnValue == 2 ) {
6588 stream_.state = STREAM_STOPPING;
6589 handle->drainCounter = 2;
6593 else if ( cbReturnValue == 1 ) {
6594 handle->drainCounter = 1;
6595 handle->internalDrain = true;
6600 DWORD currentWritePointer, safeWritePointer;
6601 DWORD currentReadPointer, safeReadPointer;
6602 UINT nextWritePointer;
6604 LPVOID buffer1 = NULL;
6605 LPVOID buffer2 = NULL;
6606 DWORD bufferSize1 = 0;
6607 DWORD bufferSize2 = 0;
6612 MUTEX_LOCK( &stream_.mutex );
6613 if ( stream_.state == STREAM_STOPPED ) {
6614 MUTEX_UNLOCK( &stream_.mutex );
6618 if ( buffersRolling == false ) {
6619 if ( stream_.mode == DUPLEX ) {
6620 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6622 // It takes a while for the devices to get rolling. As a result,
6623 // there's no guarantee that the capture and write device pointers
6624 // will move in lockstep. Wait here for both devices to start
6625 // rolling, and then set our buffer pointers accordingly.
6626 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6627 // bytes later than the write buffer.
6629 // Stub: a serious risk of having a pre-emptive scheduling round
6630 // take place between the two GetCurrentPosition calls... but I'm
6631 // really not sure how to solve the problem. Temporarily boost to
6632 // Realtime priority, maybe; but I'm not sure what priority the
6633 // DirectSound service threads run at. We *should* be roughly
6634 // within a ms or so of correct.
6636 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6637 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6639 DWORD startSafeWritePointer, startSafeReadPointer;
6641 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6642 if ( FAILED( result ) ) {
6643 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6644 errorText_ = errorStream_.str();
6645 MUTEX_UNLOCK( &stream_.mutex );
6646 error( RtAudioError::SYSTEM_ERROR );
6649 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6650 if ( FAILED( result ) ) {
6651 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6652 errorText_ = errorStream_.str();
6653 MUTEX_UNLOCK( &stream_.mutex );
6654 error( RtAudioError::SYSTEM_ERROR );
6658 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6659 if ( FAILED( result ) ) {
6660 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6661 errorText_ = errorStream_.str();
6662 MUTEX_UNLOCK( &stream_.mutex );
6663 error( RtAudioError::SYSTEM_ERROR );
6666 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6667 if ( FAILED( result ) ) {
6668 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6669 errorText_ = errorStream_.str();
6670 MUTEX_UNLOCK( &stream_.mutex );
6671 error( RtAudioError::SYSTEM_ERROR );
6674 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6678 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6680 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6681 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6682 handle->bufferPointer[1] = safeReadPointer;
6684 else if ( stream_.mode == OUTPUT ) {
6686 // Set the proper nextWritePosition after initial startup.
6687 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6688 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6689 if ( FAILED( result ) ) {
6690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6691 errorText_ = errorStream_.str();
6692 MUTEX_UNLOCK( &stream_.mutex );
6693 error( RtAudioError::SYSTEM_ERROR );
6696 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6697 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6700 buffersRolling = true;
6703 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6705 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6707 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6708 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6709 bufferBytes *= formatBytes( stream_.userFormat );
6710 memset( stream_.userBuffer[0], 0, bufferBytes );
6713 // Setup parameters and do buffer conversion if necessary.
6714 if ( stream_.doConvertBuffer[0] ) {
6715 buffer = stream_.deviceBuffer;
6716 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6717 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6718 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6721 buffer = stream_.userBuffer[0];
6722 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6723 bufferBytes *= formatBytes( stream_.userFormat );
6726 // No byte swapping necessary in DirectSound implementation.
6728 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6729 // unsigned. So, we need to convert our signed 8-bit data here to
6731 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6732 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6734 DWORD dsBufferSize = handle->dsBufferSize[0];
6735 nextWritePointer = handle->bufferPointer[0];
6737 DWORD endWrite, leadPointer;
6739 // Find out where the read and "safe write" pointers are.
6740 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6741 if ( FAILED( result ) ) {
6742 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6743 errorText_ = errorStream_.str();
6744 MUTEX_UNLOCK( &stream_.mutex );
6745 error( RtAudioError::SYSTEM_ERROR );
6749 // We will copy our output buffer into the region between
6750 // safeWritePointer and leadPointer. If leadPointer is not
6751 // beyond the next endWrite position, wait until it is.
6752 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6753 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6754 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6755 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6756 endWrite = nextWritePointer + bufferBytes;
6758 // Check whether the entire write region is behind the play pointer.
6759 if ( leadPointer >= endWrite ) break;
6761 // If we are here, then we must wait until the leadPointer advances
6762 // beyond the end of our next write region. We use the
6763 // Sleep() function to suspend operation until that happens.
6764 double millis = ( endWrite - leadPointer ) * 1000.0;
6765 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6766 if ( millis < 1.0 ) millis = 1.0;
6767 Sleep( (DWORD) millis );
6770 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6771 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6772 // We've strayed into the forbidden zone ... resync the read pointer.
6773 handle->xrun[0] = true;
6774 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6775 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6776 handle->bufferPointer[0] = nextWritePointer;
6777 endWrite = nextWritePointer + bufferBytes;
6780 // Lock free space in the buffer
6781 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6782 &bufferSize1, &buffer2, &bufferSize2, 0 );
6783 if ( FAILED( result ) ) {
6784 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6785 errorText_ = errorStream_.str();
6786 MUTEX_UNLOCK( &stream_.mutex );
6787 error( RtAudioError::SYSTEM_ERROR );
6791 // Copy our buffer into the DS buffer
6792 CopyMemory( buffer1, buffer, bufferSize1 );
6793 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6795 // Update our buffer offset and unlock sound buffer
6796 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6797 if ( FAILED( result ) ) {
6798 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6799 errorText_ = errorStream_.str();
6800 MUTEX_UNLOCK( &stream_.mutex );
6801 error( RtAudioError::SYSTEM_ERROR );
6804 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6805 handle->bufferPointer[0] = nextWritePointer;
6808 // Don't bother draining input
6809 if ( handle->drainCounter ) {
6810 handle->drainCounter++;
6814 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6816 // Setup parameters.
6817 if ( stream_.doConvertBuffer[1] ) {
6818 buffer = stream_.deviceBuffer;
6819 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6820 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6823 buffer = stream_.userBuffer[1];
6824 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6825 bufferBytes *= formatBytes( stream_.userFormat );
6828 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6829 long nextReadPointer = handle->bufferPointer[1];
6830 DWORD dsBufferSize = handle->dsBufferSize[1];
6832 // Find out where the write and "safe read" pointers are.
6833 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6834 if ( FAILED( result ) ) {
6835 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6836 errorText_ = errorStream_.str();
6837 MUTEX_UNLOCK( &stream_.mutex );
6838 error( RtAudioError::SYSTEM_ERROR );
6842 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6843 DWORD endRead = nextReadPointer + bufferBytes;
6845 // Handling depends on whether we are INPUT or DUPLEX.
6846 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6847 // then a wait here will drag the write pointers into the forbidden zone.
6849 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6850 // it's in a safe position. This causes dropouts, but it seems to be the only
6851 // practical way to sync up the read and write pointers reliably, given the
6852 // the very complex relationship between phase and increment of the read and write
6855 // In order to minimize audible dropouts in DUPLEX mode, we will
6856 // provide a pre-roll period of 0.5 seconds in which we return
6857 // zeros from the read buffer while the pointers sync up.
6859 if ( stream_.mode == DUPLEX ) {
6860 if ( safeReadPointer < endRead ) {
6861 if ( duplexPrerollBytes <= 0 ) {
6862 // Pre-roll time over. Be more agressive.
6863 int adjustment = endRead-safeReadPointer;
6865 handle->xrun[1] = true;
6867 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6868 // and perform fine adjustments later.
6869 // - small adjustments: back off by twice as much.
6870 if ( adjustment >= 2*bufferBytes )
6871 nextReadPointer = safeReadPointer-2*bufferBytes;
6873 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6875 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6879 // In pre=roll time. Just do it.
6880 nextReadPointer = safeReadPointer - bufferBytes;
6881 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6883 endRead = nextReadPointer + bufferBytes;
6886 else { // mode == INPUT
6887 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6888 // See comments for playback.
6889 double millis = (endRead - safeReadPointer) * 1000.0;
6890 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6891 if ( millis < 1.0 ) millis = 1.0;
6892 Sleep( (DWORD) millis );
6894 // Wake up and find out where we are now.
6895 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6896 if ( FAILED( result ) ) {
6897 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6898 errorText_ = errorStream_.str();
6899 MUTEX_UNLOCK( &stream_.mutex );
6900 error( RtAudioError::SYSTEM_ERROR );
6904 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6908 // Lock free space in the buffer
6909 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6910 &bufferSize1, &buffer2, &bufferSize2, 0 );
6911 if ( FAILED( result ) ) {
6912 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6913 errorText_ = errorStream_.str();
6914 MUTEX_UNLOCK( &stream_.mutex );
6915 error( RtAudioError::SYSTEM_ERROR );
6919 if ( duplexPrerollBytes <= 0 ) {
6920 // Copy our buffer into the DS buffer
6921 CopyMemory( buffer, buffer1, bufferSize1 );
6922 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6925 memset( buffer, 0, bufferSize1 );
6926 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6927 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6930 // Update our buffer offset and unlock sound buffer
6931 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6932 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6933 if ( FAILED( result ) ) {
6934 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6935 errorText_ = errorStream_.str();
6936 MUTEX_UNLOCK( &stream_.mutex );
6937 error( RtAudioError::SYSTEM_ERROR );
6940 handle->bufferPointer[1] = nextReadPointer;
6942 // No byte swapping necessary in DirectSound implementation.
6944 // If necessary, convert 8-bit data from unsigned to signed.
6945 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6946 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6948 // Do buffer conversion if necessary.
6949 if ( stream_.doConvertBuffer[1] )
6950 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6954 MUTEX_UNLOCK( &stream_.mutex );
6955 RtApi::tickStreamTime();
6958 // Definitions for utility functions and callbacks
6959 // specific to the DirectSound implementation.
6961 static unsigned __stdcall callbackHandler( void *ptr )
6963 CallbackInfo *info = (CallbackInfo *) ptr;
6964 RtApiDs *object = (RtApiDs *) info->object;
6965 bool* isRunning = &info->isRunning;
6967 while ( *isRunning == true ) {
6968 object->callbackEvent();
6975 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6976 LPCTSTR description,
6980 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6981 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6984 bool validDevice = false;
6985 if ( probeInfo.isInput == true ) {
6987 LPDIRECTSOUNDCAPTURE object;
6989 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6990 if ( hr != DS_OK ) return TRUE;
6992 caps.dwSize = sizeof(caps);
6993 hr = object->GetCaps( &caps );
6994 if ( hr == DS_OK ) {
6995 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7002 LPDIRECTSOUND object;
7003 hr = DirectSoundCreate( lpguid, &object, NULL );
7004 if ( hr != DS_OK ) return TRUE;
7006 caps.dwSize = sizeof(caps);
7007 hr = object->GetCaps( &caps );
7008 if ( hr == DS_OK ) {
7009 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7015 // If good device, then save its name and guid.
7016 std::string name = convertCharPointerToStdString( description );
7017 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7018 if ( lpguid == NULL )
7019 name = "Default Device";
7020 if ( validDevice ) {
7021 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7022 if ( dsDevices[i].name == name ) {
7023 dsDevices[i].found = true;
7024 if ( probeInfo.isInput ) {
7025 dsDevices[i].id[1] = lpguid;
7026 dsDevices[i].validId[1] = true;
7029 dsDevices[i].id[0] = lpguid;
7030 dsDevices[i].validId[0] = true;
7038 device.found = true;
7039 if ( probeInfo.isInput ) {
7040 device.id[1] = lpguid;
7041 device.validId[1] = true;
7044 device.id[0] = lpguid;
7045 device.validId[0] = true;
7047 dsDevices.push_back( device );
7053 static const char* getErrorString( int code )
7057 case DSERR_ALLOCATED:
7058 return "Already allocated";
7060 case DSERR_CONTROLUNAVAIL:
7061 return "Control unavailable";
7063 case DSERR_INVALIDPARAM:
7064 return "Invalid parameter";
7066 case DSERR_INVALIDCALL:
7067 return "Invalid call";
7070 return "Generic error";
7072 case DSERR_PRIOLEVELNEEDED:
7073 return "Priority level needed";
7075 case DSERR_OUTOFMEMORY:
7076 return "Out of memory";
7078 case DSERR_BADFORMAT:
7079 return "The sample rate or the channel format is not supported";
7081 case DSERR_UNSUPPORTED:
7082 return "Not supported";
7084 case DSERR_NODRIVER:
7087 case DSERR_ALREADYINITIALIZED:
7088 return "Already initialized";
7090 case DSERR_NOAGGREGATION:
7091 return "No aggregation";
7093 case DSERR_BUFFERLOST:
7094 return "Buffer lost";
7096 case DSERR_OTHERAPPHASPRIO:
7097 return "Another application already has priority";
7099 case DSERR_UNINITIALIZED:
7100 return "Uninitialized";
7103 return "DirectSound unknown error";
7106 //******************** End of __WINDOWS_DS__ *********************//
7110 #if defined(__LINUX_ALSA__)
7112 #include <alsa/asoundlib.h>
7115 // A structure to hold various information related to the ALSA API
7118 snd_pcm_t *handles[2];
7121 pthread_cond_t runnable_cv;
7125 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7128 static void *alsaCallbackHandler( void * ptr );
7130 RtApiAlsa :: RtApiAlsa()
7132 // Nothing to do here.
7135 RtApiAlsa :: ~RtApiAlsa()
7137 if ( stream_.state != STREAM_CLOSED ) closeStream();
7140 unsigned int RtApiAlsa :: getDeviceCount( void )
7142 unsigned nDevices = 0;
7143 int result, subdevice, card;
7145 snd_ctl_t *handle = 0;
7147 // Count cards and devices
7149 snd_card_next( &card );
7150 while ( card >= 0 ) {
7151 sprintf( name, "hw:%d", card );
7152 result = snd_ctl_open( &handle, name, 0 );
7155 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7156 errorText_ = errorStream_.str();
7157 error( RtAudioError::WARNING );
7162 result = snd_ctl_pcm_next_device( handle, &subdevice );
7164 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7165 errorText_ = errorStream_.str();
7166 error( RtAudioError::WARNING );
7169 if ( subdevice < 0 )
7175 snd_ctl_close( handle );
7176 snd_card_next( &card );
7179 result = snd_ctl_open( &handle, "default", 0 );
7182 snd_ctl_close( handle );
7188 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7190 RtAudio::DeviceInfo info;
7191 info.probed = false;
7193 unsigned nDevices = 0;
7194 int result, subdevice, card;
7196 snd_ctl_t *chandle = 0;
7198 // Count cards and devices
7201 snd_card_next( &card );
7202 while ( card >= 0 ) {
7203 sprintf( name, "hw:%d", card );
7204 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7207 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7208 errorText_ = errorStream_.str();
7209 error( RtAudioError::WARNING );
7214 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7216 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7217 errorText_ = errorStream_.str();
7218 error( RtAudioError::WARNING );
7221 if ( subdevice < 0 ) break;
7222 if ( nDevices == device ) {
7223 sprintf( name, "hw:%d,%d", card, subdevice );
7230 snd_ctl_close( chandle );
7231 snd_card_next( &card );
7234 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7235 if ( result == 0 ) {
7236 if ( nDevices == device ) {
7237 strcpy( name, "default" );
7243 if ( nDevices == 0 ) {
7244 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7245 error( RtAudioError::INVALID_USE );
7249 if ( device >= nDevices ) {
7250 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7251 error( RtAudioError::INVALID_USE );
7257 // If a stream is already open, we cannot probe the stream devices.
7258 // Thus, use the saved results.
7259 if ( stream_.state != STREAM_CLOSED &&
7260 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7261 snd_ctl_close( chandle );
7262 if ( device >= devices_.size() ) {
7263 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7264 error( RtAudioError::WARNING );
7267 return devices_[ device ];
7270 int openMode = SND_PCM_ASYNC;
7271 snd_pcm_stream_t stream;
7272 snd_pcm_info_t *pcminfo;
7273 snd_pcm_info_alloca( &pcminfo );
7275 snd_pcm_hw_params_t *params;
7276 snd_pcm_hw_params_alloca( ¶ms );
7278 // First try for playback unless default device (which has subdev -1)
7279 stream = SND_PCM_STREAM_PLAYBACK;
7280 snd_pcm_info_set_stream( pcminfo, stream );
7281 if ( subdevice != -1 ) {
7282 snd_pcm_info_set_device( pcminfo, subdevice );
7283 snd_pcm_info_set_subdevice( pcminfo, 0 );
7285 result = snd_ctl_pcm_info( chandle, pcminfo );
7287 // Device probably doesn't support playback.
7292 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7294 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7295 errorText_ = errorStream_.str();
7296 error( RtAudioError::WARNING );
7300 // The device is open ... fill the parameter structure.
7301 result = snd_pcm_hw_params_any( phandle, params );
7303 snd_pcm_close( phandle );
7304 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7305 errorText_ = errorStream_.str();
7306 error( RtAudioError::WARNING );
7310 // Get output channel information.
7312 result = snd_pcm_hw_params_get_channels_max( params, &value );
7314 snd_pcm_close( phandle );
7315 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7316 errorText_ = errorStream_.str();
7317 error( RtAudioError::WARNING );
7320 info.outputChannels = value;
7321 snd_pcm_close( phandle );
7324 stream = SND_PCM_STREAM_CAPTURE;
7325 snd_pcm_info_set_stream( pcminfo, stream );
7327 // Now try for capture unless default device (with subdev = -1)
7328 if ( subdevice != -1 ) {
7329 result = snd_ctl_pcm_info( chandle, pcminfo );
7330 snd_ctl_close( chandle );
7332 // Device probably doesn't support capture.
7333 if ( info.outputChannels == 0 ) return info;
7334 goto probeParameters;
7338 snd_ctl_close( chandle );
7340 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7342 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7343 errorText_ = errorStream_.str();
7344 error( RtAudioError::WARNING );
7345 if ( info.outputChannels == 0 ) return info;
7346 goto probeParameters;
7349 // The device is open ... fill the parameter structure.
7350 result = snd_pcm_hw_params_any( phandle, params );
7352 snd_pcm_close( phandle );
7353 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7354 errorText_ = errorStream_.str();
7355 error( RtAudioError::WARNING );
7356 if ( info.outputChannels == 0 ) return info;
7357 goto probeParameters;
7360 result = snd_pcm_hw_params_get_channels_max( params, &value );
7362 snd_pcm_close( phandle );
7363 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7364 errorText_ = errorStream_.str();
7365 error( RtAudioError::WARNING );
7366 if ( info.outputChannels == 0 ) return info;
7367 goto probeParameters;
7369 info.inputChannels = value;
7370 snd_pcm_close( phandle );
7372 // If device opens for both playback and capture, we determine the channels.
7373 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7374 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7376 // ALSA doesn't provide default devices so we'll use the first available one.
7377 if ( device == 0 && info.outputChannels > 0 )
7378 info.isDefaultOutput = true;
7379 if ( device == 0 && info.inputChannels > 0 )
7380 info.isDefaultInput = true;
7383 // At this point, we just need to figure out the supported data
7384 // formats and sample rates. We'll proceed by opening the device in
7385 // the direction with the maximum number of channels, or playback if
7386 // they are equal. This might limit our sample rate options, but so
7389 if ( info.outputChannels >= info.inputChannels )
7390 stream = SND_PCM_STREAM_PLAYBACK;
7392 stream = SND_PCM_STREAM_CAPTURE;
7393 snd_pcm_info_set_stream( pcminfo, stream );
7395 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7397 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7398 errorText_ = errorStream_.str();
7399 error( RtAudioError::WARNING );
7403 // The device is open ... fill the parameter structure.
7404 result = snd_pcm_hw_params_any( phandle, params );
7406 snd_pcm_close( phandle );
7407 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7408 errorText_ = errorStream_.str();
7409 error( RtAudioError::WARNING );
7413 // Test our discrete set of sample rate values.
7414 info.sampleRates.clear();
7415 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7416 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7417 info.sampleRates.push_back( SAMPLE_RATES[i] );
7419 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7420 info.preferredSampleRate = SAMPLE_RATES[i];
7423 if ( info.sampleRates.size() == 0 ) {
7424 snd_pcm_close( phandle );
7425 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7426 errorText_ = errorStream_.str();
7427 error( RtAudioError::WARNING );
7431 // Probe the supported data formats ... we don't care about endian-ness just yet
7432 snd_pcm_format_t format;
7433 info.nativeFormats = 0;
7434 format = SND_PCM_FORMAT_S8;
7435 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7436 info.nativeFormats |= RTAUDIO_SINT8;
7437 format = SND_PCM_FORMAT_S16;
7438 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7439 info.nativeFormats |= RTAUDIO_SINT16;
7440 format = SND_PCM_FORMAT_S24;
7441 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7442 info.nativeFormats |= RTAUDIO_SINT24;
7443 format = SND_PCM_FORMAT_S32;
7444 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7445 info.nativeFormats |= RTAUDIO_SINT32;
7446 format = SND_PCM_FORMAT_FLOAT;
7447 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7448 info.nativeFormats |= RTAUDIO_FLOAT32;
7449 format = SND_PCM_FORMAT_FLOAT64;
7450 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7451 info.nativeFormats |= RTAUDIO_FLOAT64;
7453 // Check that we have at least one supported format
7454 if ( info.nativeFormats == 0 ) {
7455 snd_pcm_close( phandle );
7456 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7457 errorText_ = errorStream_.str();
7458 error( RtAudioError::WARNING );
7462 // Get the device name
7464 result = snd_card_get_name( card, &cardname );
7465 if ( result >= 0 ) {
7466 sprintf( name, "hw:%s,%d", cardname, subdevice );
7471 // That's all ... close the device and return
7472 snd_pcm_close( phandle );
7477 void RtApiAlsa :: saveDeviceInfo( void )
7481 unsigned int nDevices = getDeviceCount();
7482 devices_.resize( nDevices );
7483 for ( unsigned int i=0; i<nDevices; i++ )
7484 devices_[i] = getDeviceInfo( i );
7487 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7488 unsigned int firstChannel, unsigned int sampleRate,
7489 RtAudioFormat format, unsigned int *bufferSize,
7490 RtAudio::StreamOptions *options )
7493 #if defined(__RTAUDIO_DEBUG__)
7495 snd_output_stdio_attach(&out, stderr, 0);
7498 // I'm not using the "plug" interface ... too much inconsistent behavior.
7500 unsigned nDevices = 0;
7501 int result, subdevice, card;
7505 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7506 snprintf(name, sizeof(name), "%s", "default");
7508 // Count cards and devices
7510 snd_card_next( &card );
7511 while ( card >= 0 ) {
7512 sprintf( name, "hw:%d", card );
7513 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7515 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7516 errorText_ = errorStream_.str();
7521 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7522 if ( result < 0 ) break;
7523 if ( subdevice < 0 ) break;
7524 if ( nDevices == device ) {
7525 sprintf( name, "hw:%d,%d", card, subdevice );
7526 snd_ctl_close( chandle );
7531 snd_ctl_close( chandle );
7532 snd_card_next( &card );
7535 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7536 if ( result == 0 ) {
7537 if ( nDevices == device ) {
7538 strcpy( name, "default" );
7539 snd_ctl_close( chandle );
7544 snd_ctl_close( chandle );
7546 if ( nDevices == 0 ) {
7547 // This should not happen because a check is made before this function is called.
7548 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7552 if ( device >= nDevices ) {
7553 // This should not happen because a check is made before this function is called.
7554 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7561 // The getDeviceInfo() function will not work for a device that is
7562 // already open. Thus, we'll probe the system before opening a
7563 // stream and save the results for use by getDeviceInfo().
7564 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7565 this->saveDeviceInfo();
7567 snd_pcm_stream_t stream;
7568 if ( mode == OUTPUT )
7569 stream = SND_PCM_STREAM_PLAYBACK;
7571 stream = SND_PCM_STREAM_CAPTURE;
7574 int openMode = SND_PCM_ASYNC;
7575 result = snd_pcm_open( &phandle, name, stream, openMode );
7577 if ( mode == OUTPUT )
7578 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7580 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7581 errorText_ = errorStream_.str();
7585 // Fill the parameter structure.
7586 snd_pcm_hw_params_t *hw_params;
7587 snd_pcm_hw_params_alloca( &hw_params );
7588 result = snd_pcm_hw_params_any( phandle, hw_params );
7590 snd_pcm_close( phandle );
7591 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7592 errorText_ = errorStream_.str();
7596 #if defined(__RTAUDIO_DEBUG__)
7597 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7598 snd_pcm_hw_params_dump( hw_params, out );
7601 // Set access ... check user preference.
7602 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7603 stream_.userInterleaved = false;
7604 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7606 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7607 stream_.deviceInterleaved[mode] = true;
7610 stream_.deviceInterleaved[mode] = false;
7613 stream_.userInterleaved = true;
7614 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7616 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7617 stream_.deviceInterleaved[mode] = false;
7620 stream_.deviceInterleaved[mode] = true;
7624 snd_pcm_close( phandle );
7625 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7626 errorText_ = errorStream_.str();
7630 // Determine how to set the device format.
7631 stream_.userFormat = format;
7632 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7634 if ( format == RTAUDIO_SINT8 )
7635 deviceFormat = SND_PCM_FORMAT_S8;
7636 else if ( format == RTAUDIO_SINT16 )
7637 deviceFormat = SND_PCM_FORMAT_S16;
7638 else if ( format == RTAUDIO_SINT24 )
7639 deviceFormat = SND_PCM_FORMAT_S24;
7640 else if ( format == RTAUDIO_SINT32 )
7641 deviceFormat = SND_PCM_FORMAT_S32;
7642 else if ( format == RTAUDIO_FLOAT32 )
7643 deviceFormat = SND_PCM_FORMAT_FLOAT;
7644 else if ( format == RTAUDIO_FLOAT64 )
7645 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7647 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7648 stream_.deviceFormat[mode] = format;
7652 // The user requested format is not natively supported by the device.
7653 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7654 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7655 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7659 deviceFormat = SND_PCM_FORMAT_FLOAT;
7660 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7661 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7665 deviceFormat = SND_PCM_FORMAT_S32;
7666 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7667 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7671 deviceFormat = SND_PCM_FORMAT_S24;
7672 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7673 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7677 deviceFormat = SND_PCM_FORMAT_S16;
7678 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7679 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7683 deviceFormat = SND_PCM_FORMAT_S8;
7684 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7685 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7689 // If we get here, no supported format was found.
7690 snd_pcm_close( phandle );
7691 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7692 errorText_ = errorStream_.str();
7696 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7698 snd_pcm_close( phandle );
7699 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7700 errorText_ = errorStream_.str();
7704 // Determine whether byte-swaping is necessary.
7705 stream_.doByteSwap[mode] = false;
7706 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7707 result = snd_pcm_format_cpu_endian( deviceFormat );
7709 stream_.doByteSwap[mode] = true;
7710 else if (result < 0) {
7711 snd_pcm_close( phandle );
7712 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7713 errorText_ = errorStream_.str();
7718 // Set the sample rate.
7719 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7721 snd_pcm_close( phandle );
7722 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7723 errorText_ = errorStream_.str();
7727 // Determine the number of channels for this device. We support a possible
7728 // minimum device channel number > than the value requested by the user.
7729 stream_.nUserChannels[mode] = channels;
7731 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7732 unsigned int deviceChannels = value;
7733 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7734 snd_pcm_close( phandle );
7735 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7736 errorText_ = errorStream_.str();
7740 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7742 snd_pcm_close( phandle );
7743 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7744 errorText_ = errorStream_.str();
7747 deviceChannels = value;
7748 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7749 stream_.nDeviceChannels[mode] = deviceChannels;
7751 // Set the device channels.
7752 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7754 snd_pcm_close( phandle );
7755 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7756 errorText_ = errorStream_.str();
7760 // Set the buffer (or period) size.
7762 snd_pcm_uframes_t periodSize = *bufferSize;
7763 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7765 snd_pcm_close( phandle );
7766 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7767 errorText_ = errorStream_.str();
7770 *bufferSize = periodSize;
7772 // Set the buffer number, which in ALSA is referred to as the "period".
7773 unsigned int periods = 0;
7774 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7775 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7776 if ( periods < 2 ) periods = 4; // a fairly safe default value
7777 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7779 snd_pcm_close( phandle );
7780 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7781 errorText_ = errorStream_.str();
7785 // If attempting to setup a duplex stream, the bufferSize parameter
7786 // MUST be the same in both directions!
7787 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7788 snd_pcm_close( phandle );
7789 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7790 errorText_ = errorStream_.str();
7794 stream_.bufferSize = *bufferSize;
7796 // Install the hardware configuration
7797 result = snd_pcm_hw_params( phandle, hw_params );
7799 snd_pcm_close( phandle );
7800 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7801 errorText_ = errorStream_.str();
7805 #if defined(__RTAUDIO_DEBUG__)
7806 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7807 snd_pcm_hw_params_dump( hw_params, out );
7810 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7811 snd_pcm_sw_params_t *sw_params = NULL;
7812 snd_pcm_sw_params_alloca( &sw_params );
7813 snd_pcm_sw_params_current( phandle, sw_params );
7814 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7815 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7816 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7818 // The following two settings were suggested by Theo Veenker
7819 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7820 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7822 // here are two options for a fix
7823 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7824 snd_pcm_uframes_t val;
7825 snd_pcm_sw_params_get_boundary( sw_params, &val );
7826 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7828 result = snd_pcm_sw_params( phandle, sw_params );
7830 snd_pcm_close( phandle );
7831 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7832 errorText_ = errorStream_.str();
7836 #if defined(__RTAUDIO_DEBUG__)
7837 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7838 snd_pcm_sw_params_dump( sw_params, out );
7841 // Set flags for buffer conversion
7842 stream_.doConvertBuffer[mode] = false;
7843 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7844 stream_.doConvertBuffer[mode] = true;
7845 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7846 stream_.doConvertBuffer[mode] = true;
7847 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7848 stream_.nUserChannels[mode] > 1 )
7849 stream_.doConvertBuffer[mode] = true;
7851 // Allocate the ApiHandle if necessary and then save.
7852 AlsaHandle *apiInfo = 0;
7853 if ( stream_.apiHandle == 0 ) {
7855 apiInfo = (AlsaHandle *) new AlsaHandle;
7857 catch ( std::bad_alloc& ) {
7858 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7862 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7863 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7867 stream_.apiHandle = (void *) apiInfo;
7868 apiInfo->handles[0] = 0;
7869 apiInfo->handles[1] = 0;
7872 apiInfo = (AlsaHandle *) stream_.apiHandle;
7874 apiInfo->handles[mode] = phandle;
7877 // Allocate necessary internal buffers.
7878 unsigned long bufferBytes;
7879 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7880 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7881 if ( stream_.userBuffer[mode] == NULL ) {
7882 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7886 if ( stream_.doConvertBuffer[mode] ) {
7888 bool makeBuffer = true;
7889 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7890 if ( mode == INPUT ) {
7891 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7892 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7893 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7898 bufferBytes *= *bufferSize;
7899 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7900 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7901 if ( stream_.deviceBuffer == NULL ) {
7902 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7908 stream_.sampleRate = sampleRate;
7909 stream_.nBuffers = periods;
7910 stream_.device[mode] = device;
7911 stream_.state = STREAM_STOPPED;
7913 // Setup the buffer conversion information structure.
7914 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7916 // Setup thread if necessary.
7917 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7918 // We had already set up an output stream.
7919 stream_.mode = DUPLEX;
7920 // Link the streams if possible.
7921 apiInfo->synchronized = false;
7922 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7923 apiInfo->synchronized = true;
7925 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7926 error( RtAudioError::WARNING );
7930 stream_.mode = mode;
7932 // Setup callback thread.
7933 stream_.callbackInfo.object = (void *) this;
7935 // Set the thread attributes for joinable and realtime scheduling
7936 // priority (optional). The higher priority will only take affect
7937 // if the program is run as root or suid. Note, under Linux
7938 // processes with CAP_SYS_NICE privilege, a user can change
7939 // scheduling policy and priority (thus need not be root). See
7940 // POSIX "capabilities".
7941 pthread_attr_t attr;
7942 pthread_attr_init( &attr );
7943 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7944 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7945 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7946 stream_.callbackInfo.doRealtime = true;
7947 struct sched_param param;
7948 int priority = options->priority;
7949 int min = sched_get_priority_min( SCHED_RR );
7950 int max = sched_get_priority_max( SCHED_RR );
7951 if ( priority < min ) priority = min;
7952 else if ( priority > max ) priority = max;
7953 param.sched_priority = priority;
7955 // Set the policy BEFORE the priority. Otherwise it fails.
7956 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7957 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7958 // This is definitely required. Otherwise it fails.
7959 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7960 pthread_attr_setschedparam(&attr, ¶m);
7963 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7965 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7968 stream_.callbackInfo.isRunning = true;
7969 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7970 pthread_attr_destroy( &attr );
7972 // Failed. Try instead with default attributes.
7973 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7975 stream_.callbackInfo.isRunning = false;
7976 errorText_ = "RtApiAlsa::error creating callback thread!";
7986 pthread_cond_destroy( &apiInfo->runnable_cv );
7987 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7988 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7990 stream_.apiHandle = 0;
7993 if ( phandle) snd_pcm_close( phandle );
7995 for ( int i=0; i<2; i++ ) {
7996 if ( stream_.userBuffer[i] ) {
7997 free( stream_.userBuffer[i] );
7998 stream_.userBuffer[i] = 0;
8002 if ( stream_.deviceBuffer ) {
8003 free( stream_.deviceBuffer );
8004 stream_.deviceBuffer = 0;
8007 stream_.state = STREAM_CLOSED;
8011 void RtApiAlsa :: closeStream()
8013 if ( stream_.state == STREAM_CLOSED ) {
8014 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8015 error( RtAudioError::WARNING );
8019 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8020 stream_.callbackInfo.isRunning = false;
8021 MUTEX_LOCK( &stream_.mutex );
8022 if ( stream_.state == STREAM_STOPPED ) {
8023 apiInfo->runnable = true;
8024 pthread_cond_signal( &apiInfo->runnable_cv );
8026 MUTEX_UNLOCK( &stream_.mutex );
8027 pthread_join( stream_.callbackInfo.thread, NULL );
8029 if ( stream_.state == STREAM_RUNNING ) {
8030 stream_.state = STREAM_STOPPED;
8031 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8032 snd_pcm_drop( apiInfo->handles[0] );
8033 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8034 snd_pcm_drop( apiInfo->handles[1] );
8038 pthread_cond_destroy( &apiInfo->runnable_cv );
8039 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8040 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8042 stream_.apiHandle = 0;
8045 for ( int i=0; i<2; i++ ) {
8046 if ( stream_.userBuffer[i] ) {
8047 free( stream_.userBuffer[i] );
8048 stream_.userBuffer[i] = 0;
8052 if ( stream_.deviceBuffer ) {
8053 free( stream_.deviceBuffer );
8054 stream_.deviceBuffer = 0;
8057 stream_.mode = UNINITIALIZED;
8058 stream_.state = STREAM_CLOSED;
8061 void RtApiAlsa :: startStream()
8063 // This method calls snd_pcm_prepare if the device isn't already in that state.
8066 if ( stream_.state == STREAM_RUNNING ) {
8067 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8068 error( RtAudioError::WARNING );
8072 MUTEX_LOCK( &stream_.mutex );
8074 #if defined( HAVE_GETTIMEOFDAY )
8075 gettimeofday( &stream_.lastTickTimestamp, NULL );
8079 snd_pcm_state_t state;
8080 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8081 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8082 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8083 state = snd_pcm_state( handle[0] );
8084 if ( state != SND_PCM_STATE_PREPARED ) {
8085 result = snd_pcm_prepare( handle[0] );
8087 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8088 errorText_ = errorStream_.str();
8094 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8095 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8096 state = snd_pcm_state( handle[1] );
8097 if ( state != SND_PCM_STATE_PREPARED ) {
8098 result = snd_pcm_prepare( handle[1] );
8100 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8101 errorText_ = errorStream_.str();
8107 stream_.state = STREAM_RUNNING;
8110 apiInfo->runnable = true;
8111 pthread_cond_signal( &apiInfo->runnable_cv );
8112 MUTEX_UNLOCK( &stream_.mutex );
8114 if ( result >= 0 ) return;
8115 error( RtAudioError::SYSTEM_ERROR );
8118 void RtApiAlsa :: stopStream()
8121 if ( stream_.state == STREAM_STOPPED ) {
8122 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8123 error( RtAudioError::WARNING );
8127 stream_.state = STREAM_STOPPED;
8128 MUTEX_LOCK( &stream_.mutex );
8131 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8132 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8133 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8134 if ( apiInfo->synchronized )
8135 result = snd_pcm_drop( handle[0] );
8137 result = snd_pcm_drain( handle[0] );
8139 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8140 errorText_ = errorStream_.str();
8145 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8146 result = snd_pcm_drop( handle[1] );
8148 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8149 errorText_ = errorStream_.str();
8155 apiInfo->runnable = false; // fixes high CPU usage when stopped
8156 MUTEX_UNLOCK( &stream_.mutex );
8158 if ( result >= 0 ) return;
8159 error( RtAudioError::SYSTEM_ERROR );
8162 void RtApiAlsa :: abortStream()
8165 if ( stream_.state == STREAM_STOPPED ) {
8166 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8167 error( RtAudioError::WARNING );
8171 stream_.state = STREAM_STOPPED;
8172 MUTEX_LOCK( &stream_.mutex );
8175 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8176 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8177 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8178 result = snd_pcm_drop( handle[0] );
8180 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8181 errorText_ = errorStream_.str();
8186 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8187 result = snd_pcm_drop( handle[1] );
8189 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8190 errorText_ = errorStream_.str();
8196 apiInfo->runnable = false; // fixes high CPU usage when stopped
8197 MUTEX_UNLOCK( &stream_.mutex );
8199 if ( result >= 0 ) return;
8200 error( RtAudioError::SYSTEM_ERROR );
8203 void RtApiAlsa :: callbackEvent()
8205 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8206 if ( stream_.state == STREAM_STOPPED ) {
8207 MUTEX_LOCK( &stream_.mutex );
8208 while ( !apiInfo->runnable )
8209 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8211 if ( stream_.state != STREAM_RUNNING ) {
8212 MUTEX_UNLOCK( &stream_.mutex );
8215 MUTEX_UNLOCK( &stream_.mutex );
8218 if ( stream_.state == STREAM_CLOSED ) {
8219 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8220 error( RtAudioError::WARNING );
8224 int doStopStream = 0;
8225 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8226 double streamTime = getStreamTime();
8227 RtAudioStreamStatus status = 0;
8228 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8229 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8230 apiInfo->xrun[0] = false;
8232 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8233 status |= RTAUDIO_INPUT_OVERFLOW;
8234 apiInfo->xrun[1] = false;
8236 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8237 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8239 if ( doStopStream == 2 ) {
8244 MUTEX_LOCK( &stream_.mutex );
8246 // The state might change while waiting on a mutex.
8247 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8253 snd_pcm_sframes_t frames;
8254 RtAudioFormat format;
8255 handle = (snd_pcm_t **) apiInfo->handles;
8257 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8259 // Setup parameters.
8260 if ( stream_.doConvertBuffer[1] ) {
8261 buffer = stream_.deviceBuffer;
8262 channels = stream_.nDeviceChannels[1];
8263 format = stream_.deviceFormat[1];
8266 buffer = stream_.userBuffer[1];
8267 channels = stream_.nUserChannels[1];
8268 format = stream_.userFormat;
8271 // Read samples from device in interleaved/non-interleaved format.
8272 if ( stream_.deviceInterleaved[1] )
8273 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8275 void *bufs[channels];
8276 size_t offset = stream_.bufferSize * formatBytes( format );
8277 for ( int i=0; i<channels; i++ )
8278 bufs[i] = (void *) (buffer + (i * offset));
8279 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8282 if ( result < (int) stream_.bufferSize ) {
8283 // Either an error or overrun occured.
8284 if ( result == -EPIPE ) {
8285 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8286 if ( state == SND_PCM_STATE_XRUN ) {
8287 apiInfo->xrun[1] = true;
8288 result = snd_pcm_prepare( handle[1] );
8290 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8291 errorText_ = errorStream_.str();
8295 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8296 errorText_ = errorStream_.str();
8300 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8301 errorText_ = errorStream_.str();
8303 error( RtAudioError::WARNING );
8307 // Do byte swapping if necessary.
8308 if ( stream_.doByteSwap[1] )
8309 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8311 // Do buffer conversion if necessary.
8312 if ( stream_.doConvertBuffer[1] )
8313 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8315 // Check stream latency
8316 result = snd_pcm_delay( handle[1], &frames );
8317 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8322 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8324 // Setup parameters and do buffer conversion if necessary.
8325 if ( stream_.doConvertBuffer[0] ) {
8326 buffer = stream_.deviceBuffer;
8327 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8328 channels = stream_.nDeviceChannels[0];
8329 format = stream_.deviceFormat[0];
8332 buffer = stream_.userBuffer[0];
8333 channels = stream_.nUserChannels[0];
8334 format = stream_.userFormat;
8337 // Do byte swapping if necessary.
8338 if ( stream_.doByteSwap[0] )
8339 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8341 // Write samples to device in interleaved/non-interleaved format.
8342 if ( stream_.deviceInterleaved[0] )
8343 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8345 void *bufs[channels];
8346 size_t offset = stream_.bufferSize * formatBytes( format );
8347 for ( int i=0; i<channels; i++ )
8348 bufs[i] = (void *) (buffer + (i * offset));
8349 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8352 if ( result < (int) stream_.bufferSize ) {
8353 // Either an error or underrun occured.
8354 if ( result == -EPIPE ) {
8355 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8356 if ( state == SND_PCM_STATE_XRUN ) {
8357 apiInfo->xrun[0] = true;
8358 result = snd_pcm_prepare( handle[0] );
8360 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8361 errorText_ = errorStream_.str();
8364 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8367 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8368 errorText_ = errorStream_.str();
8372 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8373 errorText_ = errorStream_.str();
8375 error( RtAudioError::WARNING );
8379 // Check stream latency
8380 result = snd_pcm_delay( handle[0], &frames );
8381 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8385 MUTEX_UNLOCK( &stream_.mutex );
8387 RtApi::tickStreamTime();
8388 if ( doStopStream == 1 ) this->stopStream();
8391 static void *alsaCallbackHandler( void *ptr )
8393 CallbackInfo *info = (CallbackInfo *) ptr;
8394 RtApiAlsa *object = (RtApiAlsa *) info->object;
8395 bool *isRunning = &info->isRunning;
8397 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8398 if ( info->doRealtime ) {
8399 std::cerr << "RtAudio alsa: " <<
8400 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8401 "running realtime scheduling" << std::endl;
8405 while ( *isRunning == true ) {
8406 pthread_testcancel();
8407 object->callbackEvent();
8410 pthread_exit( NULL );
8413 //******************** End of __LINUX_ALSA__ *********************//
8416 #if defined(__LINUX_PULSE__)
8418 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8419 // and Tristan Matthews.
8421 #include <pulse/error.h>
8422 #include <pulse/simple.h>
8425 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8426 44100, 48000, 96000, 0};
8428 struct rtaudio_pa_format_mapping_t {
8429 RtAudioFormat rtaudio_format;
8430 pa_sample_format_t pa_format;
8433 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8434 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8435 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8436 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8437 {0, PA_SAMPLE_INVALID}};
8439 struct PulseAudioHandle {
8443 pthread_cond_t runnable_cv;
8445 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8448 RtApiPulse::~RtApiPulse()
8450 if ( stream_.state != STREAM_CLOSED )
8454 unsigned int RtApiPulse::getDeviceCount( void )
8459 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8461 RtAudio::DeviceInfo info;
8463 info.name = "PulseAudio";
8464 info.outputChannels = 2;
8465 info.inputChannels = 2;
8466 info.duplexChannels = 2;
8467 info.isDefaultOutput = true;
8468 info.isDefaultInput = true;
8470 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8471 info.sampleRates.push_back( *sr );
8473 info.preferredSampleRate = 48000;
8474 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8479 static void *pulseaudio_callback( void * user )
8481 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8482 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8483 volatile bool *isRunning = &cbi->isRunning;
8485 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8486 if (cbi->doRealtime) {
8487 std::cerr << "RtAudio pulse: " <<
8488 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8489 "running realtime scheduling" << std::endl;
8493 while ( *isRunning ) {
8494 pthread_testcancel();
8495 context->callbackEvent();
8498 pthread_exit( NULL );
8501 void RtApiPulse::closeStream( void )
8503 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8505 stream_.callbackInfo.isRunning = false;
8507 MUTEX_LOCK( &stream_.mutex );
8508 if ( stream_.state == STREAM_STOPPED ) {
8509 pah->runnable = true;
8510 pthread_cond_signal( &pah->runnable_cv );
8512 MUTEX_UNLOCK( &stream_.mutex );
8514 pthread_join( pah->thread, 0 );
8515 if ( pah->s_play ) {
8516 pa_simple_flush( pah->s_play, NULL );
8517 pa_simple_free( pah->s_play );
8520 pa_simple_free( pah->s_rec );
8522 pthread_cond_destroy( &pah->runnable_cv );
8524 stream_.apiHandle = 0;
8527 if ( stream_.userBuffer[0] ) {
8528 free( stream_.userBuffer[0] );
8529 stream_.userBuffer[0] = 0;
8531 if ( stream_.userBuffer[1] ) {
8532 free( stream_.userBuffer[1] );
8533 stream_.userBuffer[1] = 0;
8536 stream_.state = STREAM_CLOSED;
8537 stream_.mode = UNINITIALIZED;
8540 void RtApiPulse::callbackEvent( void )
8542 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8544 if ( stream_.state == STREAM_STOPPED ) {
8545 MUTEX_LOCK( &stream_.mutex );
8546 while ( !pah->runnable )
8547 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8549 if ( stream_.state != STREAM_RUNNING ) {
8550 MUTEX_UNLOCK( &stream_.mutex );
8553 MUTEX_UNLOCK( &stream_.mutex );
8556 if ( stream_.state == STREAM_CLOSED ) {
8557 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8558 "this shouldn't happen!";
8559 error( RtAudioError::WARNING );
8563 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8564 double streamTime = getStreamTime();
8565 RtAudioStreamStatus status = 0;
8566 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8567 stream_.bufferSize, streamTime, status,
8568 stream_.callbackInfo.userData );
8570 if ( doStopStream == 2 ) {
8575 MUTEX_LOCK( &stream_.mutex );
8576 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8577 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8579 if ( stream_.state != STREAM_RUNNING )
8584 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8585 if ( stream_.doConvertBuffer[OUTPUT] ) {
8586 convertBuffer( stream_.deviceBuffer,
8587 stream_.userBuffer[OUTPUT],
8588 stream_.convertInfo[OUTPUT] );
8589 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8590 formatBytes( stream_.deviceFormat[OUTPUT] );
8592 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8593 formatBytes( stream_.userFormat );
8595 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8596 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8597 pa_strerror( pa_error ) << ".";
8598 errorText_ = errorStream_.str();
8599 error( RtAudioError::WARNING );
8603 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8604 if ( stream_.doConvertBuffer[INPUT] )
8605 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8606 formatBytes( stream_.deviceFormat[INPUT] );
8608 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8609 formatBytes( stream_.userFormat );
8611 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8612 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8613 pa_strerror( pa_error ) << ".";
8614 errorText_ = errorStream_.str();
8615 error( RtAudioError::WARNING );
8617 if ( stream_.doConvertBuffer[INPUT] ) {
8618 convertBuffer( stream_.userBuffer[INPUT],
8619 stream_.deviceBuffer,
8620 stream_.convertInfo[INPUT] );
8625 MUTEX_UNLOCK( &stream_.mutex );
8626 RtApi::tickStreamTime();
8628 if ( doStopStream == 1 )
8632 void RtApiPulse::startStream( void )
8634 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8636 if ( stream_.state == STREAM_CLOSED ) {
8637 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8638 error( RtAudioError::INVALID_USE );
8641 if ( stream_.state == STREAM_RUNNING ) {
8642 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8643 error( RtAudioError::WARNING );
8647 MUTEX_LOCK( &stream_.mutex );
8649 #if defined( HAVE_GETTIMEOFDAY )
8650 gettimeofday( &stream_.lastTickTimestamp, NULL );
8653 stream_.state = STREAM_RUNNING;
8655 pah->runnable = true;
8656 pthread_cond_signal( &pah->runnable_cv );
8657 MUTEX_UNLOCK( &stream_.mutex );
8660 void RtApiPulse::stopStream( void )
8662 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8664 if ( stream_.state == STREAM_CLOSED ) {
8665 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8666 error( RtAudioError::INVALID_USE );
8669 if ( stream_.state == STREAM_STOPPED ) {
8670 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8671 error( RtAudioError::WARNING );
8675 stream_.state = STREAM_STOPPED;
8676 MUTEX_LOCK( &stream_.mutex );
8678 if ( pah && pah->s_play ) {
8680 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8681 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8682 pa_strerror( pa_error ) << ".";
8683 errorText_ = errorStream_.str();
8684 MUTEX_UNLOCK( &stream_.mutex );
8685 error( RtAudioError::SYSTEM_ERROR );
8690 stream_.state = STREAM_STOPPED;
8691 MUTEX_UNLOCK( &stream_.mutex );
8694 void RtApiPulse::abortStream( void )
8696 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8698 if ( stream_.state == STREAM_CLOSED ) {
8699 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8700 error( RtAudioError::INVALID_USE );
8703 if ( stream_.state == STREAM_STOPPED ) {
8704 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8705 error( RtAudioError::WARNING );
8709 stream_.state = STREAM_STOPPED;
8710 MUTEX_LOCK( &stream_.mutex );
8712 if ( pah && pah->s_play ) {
8714 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8715 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8716 pa_strerror( pa_error ) << ".";
8717 errorText_ = errorStream_.str();
8718 MUTEX_UNLOCK( &stream_.mutex );
8719 error( RtAudioError::SYSTEM_ERROR );
8724 stream_.state = STREAM_STOPPED;
8725 MUTEX_UNLOCK( &stream_.mutex );
8728 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8729 unsigned int channels, unsigned int firstChannel,
8730 unsigned int sampleRate, RtAudioFormat format,
8731 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8733 PulseAudioHandle *pah = 0;
8734 unsigned long bufferBytes = 0;
8737 if ( device != 0 ) return false;
8738 if ( mode != INPUT && mode != OUTPUT ) return false;
8739 if ( channels != 1 && channels != 2 ) {
8740 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8743 ss.channels = channels;
8745 if ( firstChannel != 0 ) return false;
8747 bool sr_found = false;
8748 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8749 if ( sampleRate == *sr ) {
8751 stream_.sampleRate = sampleRate;
8752 ss.rate = sampleRate;
8757 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8762 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8763 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8764 if ( format == sf->rtaudio_format ) {
8766 stream_.userFormat = sf->rtaudio_format;
8767 stream_.deviceFormat[mode] = stream_.userFormat;
8768 ss.format = sf->pa_format;
8772 if ( !sf_found ) { // Use internal data format conversion.
8773 stream_.userFormat = format;
8774 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8775 ss.format = PA_SAMPLE_FLOAT32LE;
8778 // Set other stream parameters.
8779 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8780 else stream_.userInterleaved = true;
8781 stream_.deviceInterleaved[mode] = true;
8782 stream_.nBuffers = 1;
8783 stream_.doByteSwap[mode] = false;
8784 stream_.nUserChannels[mode] = channels;
8785 stream_.nDeviceChannels[mode] = channels + firstChannel;
8786 stream_.channelOffset[mode] = 0;
8787 std::string streamName = "RtAudio";
8789 // Set flags for buffer conversion.
8790 stream_.doConvertBuffer[mode] = false;
8791 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8792 stream_.doConvertBuffer[mode] = true;
8793 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8794 stream_.doConvertBuffer[mode] = true;
8796 // Allocate necessary internal buffers.
8797 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8798 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8799 if ( stream_.userBuffer[mode] == NULL ) {
8800 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8803 stream_.bufferSize = *bufferSize;
8805 if ( stream_.doConvertBuffer[mode] ) {
8807 bool makeBuffer = true;
8808 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8809 if ( mode == INPUT ) {
8810 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8811 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8812 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8817 bufferBytes *= *bufferSize;
8818 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8819 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8820 if ( stream_.deviceBuffer == NULL ) {
8821 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8827 stream_.device[mode] = device;
8829 // Setup the buffer conversion information structure.
8830 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8832 if ( !stream_.apiHandle ) {
8833 PulseAudioHandle *pah = new PulseAudioHandle;
8835 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8839 stream_.apiHandle = pah;
8840 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8841 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8845 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8848 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8851 pa_buffer_attr buffer_attr;
8852 buffer_attr.fragsize = bufferBytes;
8853 buffer_attr.maxlength = -1;
8855 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8856 if ( !pah->s_rec ) {
8857 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8862 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8863 if ( !pah->s_play ) {
8864 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8872 if ( stream_.mode == UNINITIALIZED )
8873 stream_.mode = mode;
8874 else if ( stream_.mode == mode )
8877 stream_.mode = DUPLEX;
8879 if ( !stream_.callbackInfo.isRunning ) {
8880 stream_.callbackInfo.object = this;
8882 stream_.state = STREAM_STOPPED;
8883 // Set the thread attributes for joinable and realtime scheduling
8884 // priority (optional). The higher priority will only take affect
8885 // if the program is run as root or suid. Note, under Linux
8886 // processes with CAP_SYS_NICE privilege, a user can change
8887 // scheduling policy and priority (thus need not be root). See
8888 // POSIX "capabilities".
8889 pthread_attr_t attr;
8890 pthread_attr_init( &attr );
8891 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8892 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8893 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8894 stream_.callbackInfo.doRealtime = true;
8895 struct sched_param param;
8896 int priority = options->priority;
8897 int min = sched_get_priority_min( SCHED_RR );
8898 int max = sched_get_priority_max( SCHED_RR );
8899 if ( priority < min ) priority = min;
8900 else if ( priority > max ) priority = max;
8901 param.sched_priority = priority;
8903 // Set the policy BEFORE the priority. Otherwise it fails.
8904 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8905 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8906 // This is definitely required. Otherwise it fails.
8907 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8908 pthread_attr_setschedparam(&attr, ¶m);
8911 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8913 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8916 stream_.callbackInfo.isRunning = true;
8917 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8918 pthread_attr_destroy(&attr);
8920 // Failed. Try instead with default attributes.
8921 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8923 stream_.callbackInfo.isRunning = false;
8924 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8933 if ( pah && stream_.callbackInfo.isRunning ) {
8934 pthread_cond_destroy( &pah->runnable_cv );
8936 stream_.apiHandle = 0;
8939 for ( int i=0; i<2; i++ ) {
8940 if ( stream_.userBuffer[i] ) {
8941 free( stream_.userBuffer[i] );
8942 stream_.userBuffer[i] = 0;
8946 if ( stream_.deviceBuffer ) {
8947 free( stream_.deviceBuffer );
8948 stream_.deviceBuffer = 0;
8951 stream_.state = STREAM_CLOSED;
8955 //******************** End of __LINUX_PULSE__ *********************//
8958 #if defined(__LINUX_OSS__)
8961 #include <sys/ioctl.h>
8964 #include <sys/soundcard.h>
8968 static void *ossCallbackHandler(void * ptr);
8970 // A structure to hold various information related to the OSS API
8973 int id[2]; // device ids
8976 pthread_cond_t runnable;
8979 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8982 RtApiOss :: RtApiOss()
8984 // Nothing to do here.
8987 RtApiOss :: ~RtApiOss()
8989 if ( stream_.state != STREAM_CLOSED ) closeStream();
8992 unsigned int RtApiOss :: getDeviceCount( void )
8994 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8995 if ( mixerfd == -1 ) {
8996 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8997 error( RtAudioError::WARNING );
9001 oss_sysinfo sysinfo;
9002 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9004 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9005 error( RtAudioError::WARNING );
9010 return sysinfo.numaudios;
9013 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9015 RtAudio::DeviceInfo info;
9016 info.probed = false;
9018 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9019 if ( mixerfd == -1 ) {
9020 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9021 error( RtAudioError::WARNING );
9025 oss_sysinfo sysinfo;
9026 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9027 if ( result == -1 ) {
9029 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9030 error( RtAudioError::WARNING );
9034 unsigned nDevices = sysinfo.numaudios;
9035 if ( nDevices == 0 ) {
9037 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9038 error( RtAudioError::INVALID_USE );
9042 if ( device >= nDevices ) {
9044 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9045 error( RtAudioError::INVALID_USE );
9049 oss_audioinfo ainfo;
9051 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9053 if ( result == -1 ) {
9054 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9055 errorText_ = errorStream_.str();
9056 error( RtAudioError::WARNING );
9061 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9062 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9063 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9064 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9065 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9068 // Probe data formats ... do for input
9069 unsigned long mask = ainfo.iformats;
9070 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9071 info.nativeFormats |= RTAUDIO_SINT16;
9072 if ( mask & AFMT_S8 )
9073 info.nativeFormats |= RTAUDIO_SINT8;
9074 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9075 info.nativeFormats |= RTAUDIO_SINT32;
9077 if ( mask & AFMT_FLOAT )
9078 info.nativeFormats |= RTAUDIO_FLOAT32;
9080 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9081 info.nativeFormats |= RTAUDIO_SINT24;
9083 // Check that we have at least one supported format
9084 if ( info.nativeFormats == 0 ) {
9085 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9086 errorText_ = errorStream_.str();
9087 error( RtAudioError::WARNING );
9091 // Probe the supported sample rates.
9092 info.sampleRates.clear();
9093 if ( ainfo.nrates ) {
9094 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9095 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9096 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9097 info.sampleRates.push_back( SAMPLE_RATES[k] );
9099 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9100 info.preferredSampleRate = SAMPLE_RATES[k];
9108 // Check min and max rate values;
9109 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9110 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9111 info.sampleRates.push_back( SAMPLE_RATES[k] );
9113 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9114 info.preferredSampleRate = SAMPLE_RATES[k];
9119 if ( info.sampleRates.size() == 0 ) {
9120 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9121 errorText_ = errorStream_.str();
9122 error( RtAudioError::WARNING );
9126 info.name = ainfo.name;
9133 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9134 unsigned int firstChannel, unsigned int sampleRate,
9135 RtAudioFormat format, unsigned int *bufferSize,
9136 RtAudio::StreamOptions *options )
9138 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9139 if ( mixerfd == -1 ) {
9140 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9144 oss_sysinfo sysinfo;
9145 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9146 if ( result == -1 ) {
9148 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9152 unsigned nDevices = sysinfo.numaudios;
9153 if ( nDevices == 0 ) {
9154 // This should not happen because a check is made before this function is called.
9156 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9160 if ( device >= nDevices ) {
9161 // This should not happen because a check is made before this function is called.
9163 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9167 oss_audioinfo ainfo;
9169 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9171 if ( result == -1 ) {
9172 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9173 errorText_ = errorStream_.str();
9177 // Check if device supports input or output
9178 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9179 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9180 if ( mode == OUTPUT )
9181 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9183 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9184 errorText_ = errorStream_.str();
9189 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9190 if ( mode == OUTPUT )
9192 else { // mode == INPUT
9193 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9194 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9195 close( handle->id[0] );
9197 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9198 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9199 errorText_ = errorStream_.str();
9202 // Check that the number previously set channels is the same.
9203 if ( stream_.nUserChannels[0] != channels ) {
9204 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9205 errorText_ = errorStream_.str();
9214 // Set exclusive access if specified.
9215 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9217 // Try to open the device.
9219 fd = open( ainfo.devnode, flags, 0 );
9221 if ( errno == EBUSY )
9222 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9224 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9225 errorText_ = errorStream_.str();
9229 // For duplex operation, specifically set this mode (this doesn't seem to work).
9231 if ( flags | O_RDWR ) {
9232 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9233 if ( result == -1) {
9234 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9235 errorText_ = errorStream_.str();
9241 // Check the device channel support.
9242 stream_.nUserChannels[mode] = channels;
9243 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9245 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9246 errorText_ = errorStream_.str();
9250 // Set the number of channels.
9251 int deviceChannels = channels + firstChannel;
9252 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9253 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9255 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9256 errorText_ = errorStream_.str();
9259 stream_.nDeviceChannels[mode] = deviceChannels;
9261 // Get the data format mask
9263 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9264 if ( result == -1 ) {
9266 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9267 errorText_ = errorStream_.str();
9271 // Determine how to set the device format.
9272 stream_.userFormat = format;
9273 int deviceFormat = -1;
9274 stream_.doByteSwap[mode] = false;
9275 if ( format == RTAUDIO_SINT8 ) {
9276 if ( mask & AFMT_S8 ) {
9277 deviceFormat = AFMT_S8;
9278 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9281 else if ( format == RTAUDIO_SINT16 ) {
9282 if ( mask & AFMT_S16_NE ) {
9283 deviceFormat = AFMT_S16_NE;
9284 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9286 else if ( mask & AFMT_S16_OE ) {
9287 deviceFormat = AFMT_S16_OE;
9288 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9289 stream_.doByteSwap[mode] = true;
9292 else if ( format == RTAUDIO_SINT24 ) {
9293 if ( mask & AFMT_S24_NE ) {
9294 deviceFormat = AFMT_S24_NE;
9295 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9297 else if ( mask & AFMT_S24_OE ) {
9298 deviceFormat = AFMT_S24_OE;
9299 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9300 stream_.doByteSwap[mode] = true;
9303 else if ( format == RTAUDIO_SINT32 ) {
9304 if ( mask & AFMT_S32_NE ) {
9305 deviceFormat = AFMT_S32_NE;
9306 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9308 else if ( mask & AFMT_S32_OE ) {
9309 deviceFormat = AFMT_S32_OE;
9310 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9311 stream_.doByteSwap[mode] = true;
9315 if ( deviceFormat == -1 ) {
9316 // The user requested format is not natively supported by the device.
9317 if ( mask & AFMT_S16_NE ) {
9318 deviceFormat = AFMT_S16_NE;
9319 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9321 else if ( mask & AFMT_S32_NE ) {
9322 deviceFormat = AFMT_S32_NE;
9323 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9325 else if ( mask & AFMT_S24_NE ) {
9326 deviceFormat = AFMT_S24_NE;
9327 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9329 else if ( mask & AFMT_S16_OE ) {
9330 deviceFormat = AFMT_S16_OE;
9331 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9332 stream_.doByteSwap[mode] = true;
9334 else if ( mask & AFMT_S32_OE ) {
9335 deviceFormat = AFMT_S32_OE;
9336 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9337 stream_.doByteSwap[mode] = true;
9339 else if ( mask & AFMT_S24_OE ) {
9340 deviceFormat = AFMT_S24_OE;
9341 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9342 stream_.doByteSwap[mode] = true;
9344 else if ( mask & AFMT_S8) {
9345 deviceFormat = AFMT_S8;
9346 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9350 if ( stream_.deviceFormat[mode] == 0 ) {
9351 // This really shouldn't happen ...
9353 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9354 errorText_ = errorStream_.str();
9358 // Set the data format.
9359 int temp = deviceFormat;
9360 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9361 if ( result == -1 || deviceFormat != temp ) {
9363 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9364 errorText_ = errorStream_.str();
9368 // Attempt to set the buffer size. According to OSS, the minimum
9369 // number of buffers is two. The supposed minimum buffer size is 16
9370 // bytes, so that will be our lower bound. The argument to this
9371 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9372 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9373 // We'll check the actual value used near the end of the setup
9375 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9376 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9378 if ( options ) buffers = options->numberOfBuffers;
9379 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9380 if ( buffers < 2 ) buffers = 3;
9381 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9382 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9383 if ( result == -1 ) {
9385 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9386 errorText_ = errorStream_.str();
9389 stream_.nBuffers = buffers;
9391 // Save buffer size (in sample frames).
9392 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9393 stream_.bufferSize = *bufferSize;
9395 // Set the sample rate.
9396 int srate = sampleRate;
9397 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9398 if ( result == -1 ) {
9400 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9401 errorText_ = errorStream_.str();
9405 // Verify the sample rate setup worked.
9406 if ( abs( srate - (int)sampleRate ) > 100 ) {
9408 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9409 errorText_ = errorStream_.str();
9412 stream_.sampleRate = sampleRate;
9414 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9415 // We're doing duplex setup here.
9416 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9417 stream_.nDeviceChannels[0] = deviceChannels;
9420 // Set interleaving parameters.
9421 stream_.userInterleaved = true;
9422 stream_.deviceInterleaved[mode] = true;
9423 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9424 stream_.userInterleaved = false;
9426 // Set flags for buffer conversion
9427 stream_.doConvertBuffer[mode] = false;
9428 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9429 stream_.doConvertBuffer[mode] = true;
9430 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9431 stream_.doConvertBuffer[mode] = true;
9432 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9433 stream_.nUserChannels[mode] > 1 )
9434 stream_.doConvertBuffer[mode] = true;
9436 // Allocate the stream handles if necessary and then save.
9437 if ( stream_.apiHandle == 0 ) {
9439 handle = new OssHandle;
9441 catch ( std::bad_alloc& ) {
9442 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9446 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9447 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9451 stream_.apiHandle = (void *) handle;
9454 handle = (OssHandle *) stream_.apiHandle;
9456 handle->id[mode] = fd;
9458 // Allocate necessary internal buffers.
9459 unsigned long bufferBytes;
9460 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9461 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9462 if ( stream_.userBuffer[mode] == NULL ) {
9463 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9467 if ( stream_.doConvertBuffer[mode] ) {
9469 bool makeBuffer = true;
9470 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9471 if ( mode == INPUT ) {
9472 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9473 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9474 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9479 bufferBytes *= *bufferSize;
9480 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9481 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9482 if ( stream_.deviceBuffer == NULL ) {
9483 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9489 stream_.device[mode] = device;
9490 stream_.state = STREAM_STOPPED;
9492 // Setup the buffer conversion information structure.
9493 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9495 // Setup thread if necessary.
9496 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9497 // We had already set up an output stream.
9498 stream_.mode = DUPLEX;
9499 if ( stream_.device[0] == device ) handle->id[0] = fd;
9502 stream_.mode = mode;
9504 // Setup callback thread.
9505 stream_.callbackInfo.object = (void *) this;
9507 // Set the thread attributes for joinable and realtime scheduling
9508 // priority. The higher priority will only take affect if the
9509 // program is run as root or suid.
9510 pthread_attr_t attr;
9511 pthread_attr_init( &attr );
9512 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9513 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9514 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9515 stream_.callbackInfo.doRealtime = true;
9516 struct sched_param param;
9517 int priority = options->priority;
9518 int min = sched_get_priority_min( SCHED_RR );
9519 int max = sched_get_priority_max( SCHED_RR );
9520 if ( priority < min ) priority = min;
9521 else if ( priority > max ) priority = max;
9522 param.sched_priority = priority;
9524 // Set the policy BEFORE the priority. Otherwise it fails.
9525 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9526 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9527 // This is definitely required. Otherwise it fails.
9528 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9529 pthread_attr_setschedparam(&attr, ¶m);
9532 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9534 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9537 stream_.callbackInfo.isRunning = true;
9538 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9539 pthread_attr_destroy( &attr );
9541 // Failed. Try instead with default attributes.
9542 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9544 stream_.callbackInfo.isRunning = false;
9545 errorText_ = "RtApiOss::error creating callback thread!";
9555 pthread_cond_destroy( &handle->runnable );
9556 if ( handle->id[0] ) close( handle->id[0] );
9557 if ( handle->id[1] ) close( handle->id[1] );
9559 stream_.apiHandle = 0;
9562 for ( int i=0; i<2; i++ ) {
9563 if ( stream_.userBuffer[i] ) {
9564 free( stream_.userBuffer[i] );
9565 stream_.userBuffer[i] = 0;
9569 if ( stream_.deviceBuffer ) {
9570 free( stream_.deviceBuffer );
9571 stream_.deviceBuffer = 0;
9574 stream_.state = STREAM_CLOSED;
9578 void RtApiOss :: closeStream()
9580 if ( stream_.state == STREAM_CLOSED ) {
9581 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9582 error( RtAudioError::WARNING );
9586 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9587 stream_.callbackInfo.isRunning = false;
9588 MUTEX_LOCK( &stream_.mutex );
9589 if ( stream_.state == STREAM_STOPPED )
9590 pthread_cond_signal( &handle->runnable );
9591 MUTEX_UNLOCK( &stream_.mutex );
9592 pthread_join( stream_.callbackInfo.thread, NULL );
9594 if ( stream_.state == STREAM_RUNNING ) {
9595 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9596 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9598 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9599 stream_.state = STREAM_STOPPED;
9603 pthread_cond_destroy( &handle->runnable );
9604 if ( handle->id[0] ) close( handle->id[0] );
9605 if ( handle->id[1] ) close( handle->id[1] );
9607 stream_.apiHandle = 0;
9610 for ( int i=0; i<2; i++ ) {
9611 if ( stream_.userBuffer[i] ) {
9612 free( stream_.userBuffer[i] );
9613 stream_.userBuffer[i] = 0;
9617 if ( stream_.deviceBuffer ) {
9618 free( stream_.deviceBuffer );
9619 stream_.deviceBuffer = 0;
9622 stream_.mode = UNINITIALIZED;
9623 stream_.state = STREAM_CLOSED;
9626 void RtApiOss :: startStream()
9629 if ( stream_.state == STREAM_RUNNING ) {
9630 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9631 error( RtAudioError::WARNING );
9635 MUTEX_LOCK( &stream_.mutex );
9637 #if defined( HAVE_GETTIMEOFDAY )
9638 gettimeofday( &stream_.lastTickTimestamp, NULL );
9641 stream_.state = STREAM_RUNNING;
9643 // No need to do anything else here ... OSS automatically starts
9644 // when fed samples.
9646 MUTEX_UNLOCK( &stream_.mutex );
9648 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9649 pthread_cond_signal( &handle->runnable );
9652 void RtApiOss :: stopStream()
9655 if ( stream_.state == STREAM_STOPPED ) {
9656 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9657 error( RtAudioError::WARNING );
9661 MUTEX_LOCK( &stream_.mutex );
9663 // The state might change while waiting on a mutex.
9664 if ( stream_.state == STREAM_STOPPED ) {
9665 MUTEX_UNLOCK( &stream_.mutex );
9670 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9671 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9673 // Flush the output with zeros a few times.
9676 RtAudioFormat format;
9678 if ( stream_.doConvertBuffer[0] ) {
9679 buffer = stream_.deviceBuffer;
9680 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9681 format = stream_.deviceFormat[0];
9684 buffer = stream_.userBuffer[0];
9685 samples = stream_.bufferSize * stream_.nUserChannels[0];
9686 format = stream_.userFormat;
9689 memset( buffer, 0, samples * formatBytes(format) );
9690 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9691 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9692 if ( result == -1 ) {
9693 errorText_ = "RtApiOss::stopStream: audio write error.";
9694 error( RtAudioError::WARNING );
9698 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9699 if ( result == -1 ) {
9700 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9701 errorText_ = errorStream_.str();
9704 handle->triggered = false;
9707 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9708 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9709 if ( result == -1 ) {
9710 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9711 errorText_ = errorStream_.str();
9717 stream_.state = STREAM_STOPPED;
9718 MUTEX_UNLOCK( &stream_.mutex );
9720 if ( result != -1 ) return;
9721 error( RtAudioError::SYSTEM_ERROR );
9724 void RtApiOss :: abortStream()
9727 if ( stream_.state == STREAM_STOPPED ) {
9728 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9729 error( RtAudioError::WARNING );
9733 MUTEX_LOCK( &stream_.mutex );
9735 // The state might change while waiting on a mutex.
9736 if ( stream_.state == STREAM_STOPPED ) {
9737 MUTEX_UNLOCK( &stream_.mutex );
9742 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9743 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9744 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9745 if ( result == -1 ) {
9746 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9747 errorText_ = errorStream_.str();
9750 handle->triggered = false;
9753 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9754 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9755 if ( result == -1 ) {
9756 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9757 errorText_ = errorStream_.str();
9763 stream_.state = STREAM_STOPPED;
9764 MUTEX_UNLOCK( &stream_.mutex );
9766 if ( result != -1 ) return;
9767 error( RtAudioError::SYSTEM_ERROR );
9770 void RtApiOss :: callbackEvent()
9772 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9773 if ( stream_.state == STREAM_STOPPED ) {
9774 MUTEX_LOCK( &stream_.mutex );
9775 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9776 if ( stream_.state != STREAM_RUNNING ) {
9777 MUTEX_UNLOCK( &stream_.mutex );
9780 MUTEX_UNLOCK( &stream_.mutex );
9783 if ( stream_.state == STREAM_CLOSED ) {
9784 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9785 error( RtAudioError::WARNING );
9789 // Invoke user callback to get fresh output data.
9790 int doStopStream = 0;
9791 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9792 double streamTime = getStreamTime();
9793 RtAudioStreamStatus status = 0;
9794 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9795 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9796 handle->xrun[0] = false;
9798 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9799 status |= RTAUDIO_INPUT_OVERFLOW;
9800 handle->xrun[1] = false;
9802 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9803 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9804 if ( doStopStream == 2 ) {
9805 this->abortStream();
9809 MUTEX_LOCK( &stream_.mutex );
9811 // The state might change while waiting on a mutex.
9812 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9817 RtAudioFormat format;
9819 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9821 // Setup parameters and do buffer conversion if necessary.
9822 if ( stream_.doConvertBuffer[0] ) {
9823 buffer = stream_.deviceBuffer;
9824 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9825 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9826 format = stream_.deviceFormat[0];
9829 buffer = stream_.userBuffer[0];
9830 samples = stream_.bufferSize * stream_.nUserChannels[0];
9831 format = stream_.userFormat;
9834 // Do byte swapping if necessary.
9835 if ( stream_.doByteSwap[0] )
9836 byteSwapBuffer( buffer, samples, format );
9838 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9840 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9841 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9842 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9843 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9844 handle->triggered = true;
9847 // Write samples to device.
9848 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9850 if ( result == -1 ) {
9851 // We'll assume this is an underrun, though there isn't a
9852 // specific means for determining that.
9853 handle->xrun[0] = true;
9854 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9855 error( RtAudioError::WARNING );
9856 // Continue on to input section.
9860 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9862 // Setup parameters.
9863 if ( stream_.doConvertBuffer[1] ) {
9864 buffer = stream_.deviceBuffer;
9865 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9866 format = stream_.deviceFormat[1];
9869 buffer = stream_.userBuffer[1];
9870 samples = stream_.bufferSize * stream_.nUserChannels[1];
9871 format = stream_.userFormat;
9874 // Read samples from device.
9875 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9877 if ( result == -1 ) {
9878 // We'll assume this is an overrun, though there isn't a
9879 // specific means for determining that.
9880 handle->xrun[1] = true;
9881 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9882 error( RtAudioError::WARNING );
9886 // Do byte swapping if necessary.
9887 if ( stream_.doByteSwap[1] )
9888 byteSwapBuffer( buffer, samples, format );
9890 // Do buffer conversion if necessary.
9891 if ( stream_.doConvertBuffer[1] )
9892 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9896 MUTEX_UNLOCK( &stream_.mutex );
9898 RtApi::tickStreamTime();
9899 if ( doStopStream == 1 ) this->stopStream();
9902 static void *ossCallbackHandler( void *ptr )
9904 CallbackInfo *info = (CallbackInfo *) ptr;
9905 RtApiOss *object = (RtApiOss *) info->object;
9906 bool *isRunning = &info->isRunning;
9908 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9909 if (info->doRealtime) {
9910 std::cerr << "RtAudio oss: " <<
9911 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9912 "running realtime scheduling" << std::endl;
9916 while ( *isRunning == true ) {
9917 pthread_testcancel();
9918 object->callbackEvent();
9921 pthread_exit( NULL );
9924 //******************** End of __LINUX_OSS__ *********************//
9928 // *************************************************** //
9930 // Protected common (OS-independent) RtAudio methods.
9932 // *************************************************** //
9934 // This method can be modified to control the behavior of error
9935 // message printing.
9936 void RtApi :: error( RtAudioError::Type type )
9938 errorStream_.str(""); // clear the ostringstream
9940 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9941 if ( errorCallback ) {
9942 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9944 if ( firstErrorOccurred_ )
9947 firstErrorOccurred_ = true;
9948 const std::string errorMessage = errorText_;
9950 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9951 stream_.callbackInfo.isRunning = false; // exit from the thread
9955 errorCallback( type, errorMessage );
9956 firstErrorOccurred_ = false;
9960 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9961 std::cerr << '\n' << errorText_ << "\n\n";
9962 else if ( type != RtAudioError::WARNING )
9963 throw( RtAudioError( errorText_, type ) );
9966 void RtApi :: verifyStream()
9968 if ( stream_.state == STREAM_CLOSED ) {
9969 errorText_ = "RtApi:: a stream is not open!";
9970 error( RtAudioError::INVALID_USE );
9974 void RtApi :: clearStreamInfo()
9976 stream_.mode = UNINITIALIZED;
9977 stream_.state = STREAM_CLOSED;
9978 stream_.sampleRate = 0;
9979 stream_.bufferSize = 0;
9980 stream_.nBuffers = 0;
9981 stream_.userFormat = 0;
9982 stream_.userInterleaved = true;
9983 stream_.streamTime = 0.0;
9984 stream_.apiHandle = 0;
9985 stream_.deviceBuffer = 0;
9986 stream_.callbackInfo.callback = 0;
9987 stream_.callbackInfo.userData = 0;
9988 stream_.callbackInfo.isRunning = false;
9989 stream_.callbackInfo.errorCallback = 0;
9990 for ( int i=0; i<2; i++ ) {
9991 stream_.device[i] = 11111;
9992 stream_.doConvertBuffer[i] = false;
9993 stream_.deviceInterleaved[i] = true;
9994 stream_.doByteSwap[i] = false;
9995 stream_.nUserChannels[i] = 0;
9996 stream_.nDeviceChannels[i] = 0;
9997 stream_.channelOffset[i] = 0;
9998 stream_.deviceFormat[i] = 0;
9999 stream_.latency[i] = 0;
10000 stream_.userBuffer[i] = 0;
10001 stream_.convertInfo[i].channels = 0;
10002 stream_.convertInfo[i].inJump = 0;
10003 stream_.convertInfo[i].outJump = 0;
10004 stream_.convertInfo[i].inFormat = 0;
10005 stream_.convertInfo[i].outFormat = 0;
10006 stream_.convertInfo[i].inOffset.clear();
10007 stream_.convertInfo[i].outOffset.clear();
10011 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10013 if ( format == RTAUDIO_SINT16 )
10015 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10017 else if ( format == RTAUDIO_FLOAT64 )
10019 else if ( format == RTAUDIO_SINT24 )
10021 else if ( format == RTAUDIO_SINT8 )
10024 errorText_ = "RtApi::formatBytes: undefined format.";
10025 error( RtAudioError::WARNING );
10030 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10032 if ( mode == INPUT ) { // convert device to user buffer
10033 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10034 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10035 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10036 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10038 else { // convert user to device buffer
10039 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10040 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10041 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10042 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10045 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10046 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10048 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10050 // Set up the interleave/deinterleave offsets.
10051 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10052 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10053 ( mode == INPUT && stream_.userInterleaved ) ) {
10054 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10055 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10056 stream_.convertInfo[mode].outOffset.push_back( k );
10057 stream_.convertInfo[mode].inJump = 1;
10061 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10062 stream_.convertInfo[mode].inOffset.push_back( k );
10063 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10064 stream_.convertInfo[mode].outJump = 1;
10068 else { // no (de)interleaving
10069 if ( stream_.userInterleaved ) {
10070 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10071 stream_.convertInfo[mode].inOffset.push_back( k );
10072 stream_.convertInfo[mode].outOffset.push_back( k );
10076 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10077 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10078 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10079 stream_.convertInfo[mode].inJump = 1;
10080 stream_.convertInfo[mode].outJump = 1;
10085 // Add channel offset.
10086 if ( firstChannel > 0 ) {
10087 if ( stream_.deviceInterleaved[mode] ) {
10088 if ( mode == OUTPUT ) {
10089 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10090 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10093 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10094 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10098 if ( mode == OUTPUT ) {
10099 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10100 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10103 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10104 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10110 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10112 // This function does format conversion, input/output channel compensation, and
10113 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10114 // the lower three bytes of a 32-bit integer.
10116 // Clear our device buffer when in/out duplex device channels are different
10117 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10118 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10119 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10122 if (info.outFormat == RTAUDIO_FLOAT64) {
10124 Float64 *out = (Float64 *)outBuffer;
10126 if (info.inFormat == RTAUDIO_SINT8) {
10127 signed char *in = (signed char *)inBuffer;
10128 scale = 1.0 / 127.5;
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10130 for (j=0; j<info.channels; j++) {
10131 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10132 out[info.outOffset[j]] += 0.5;
10133 out[info.outOffset[j]] *= scale;
10136 out += info.outJump;
10139 else if (info.inFormat == RTAUDIO_SINT16) {
10140 Int16 *in = (Int16 *)inBuffer;
10141 scale = 1.0 / 32767.5;
10142 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10143 for (j=0; j<info.channels; j++) {
10144 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10145 out[info.outOffset[j]] += 0.5;
10146 out[info.outOffset[j]] *= scale;
10149 out += info.outJump;
10152 else if (info.inFormat == RTAUDIO_SINT24) {
10153 Int24 *in = (Int24 *)inBuffer;
10154 scale = 1.0 / 8388607.5;
10155 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10156 for (j=0; j<info.channels; j++) {
10157 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10158 out[info.outOffset[j]] += 0.5;
10159 out[info.outOffset[j]] *= scale;
10162 out += info.outJump;
10165 else if (info.inFormat == RTAUDIO_SINT32) {
10166 Int32 *in = (Int32 *)inBuffer;
10167 scale = 1.0 / 2147483647.5;
10168 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10169 for (j=0; j<info.channels; j++) {
10170 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10171 out[info.outOffset[j]] += 0.5;
10172 out[info.outOffset[j]] *= scale;
10175 out += info.outJump;
10178 else if (info.inFormat == RTAUDIO_FLOAT32) {
10179 Float32 *in = (Float32 *)inBuffer;
10180 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10181 for (j=0; j<info.channels; j++) {
10182 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10185 out += info.outJump;
10188 else if (info.inFormat == RTAUDIO_FLOAT64) {
10189 // Channel compensation and/or (de)interleaving only.
10190 Float64 *in = (Float64 *)inBuffer;
10191 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10192 for (j=0; j<info.channels; j++) {
10193 out[info.outOffset[j]] = in[info.inOffset[j]];
10196 out += info.outJump;
10200 else if (info.outFormat == RTAUDIO_FLOAT32) {
10202 Float32 *out = (Float32 *)outBuffer;
10204 if (info.inFormat == RTAUDIO_SINT8) {
10205 signed char *in = (signed char *)inBuffer;
10206 scale = (Float32) ( 1.0 / 127.5 );
10207 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10208 for (j=0; j<info.channels; j++) {
10209 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10210 out[info.outOffset[j]] += 0.5;
10211 out[info.outOffset[j]] *= scale;
10214 out += info.outJump;
10217 else if (info.inFormat == RTAUDIO_SINT16) {
10218 Int16 *in = (Int16 *)inBuffer;
10219 scale = (Float32) ( 1.0 / 32767.5 );
10220 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10221 for (j=0; j<info.channels; j++) {
10222 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10223 out[info.outOffset[j]] += 0.5;
10224 out[info.outOffset[j]] *= scale;
10227 out += info.outJump;
10230 else if (info.inFormat == RTAUDIO_SINT24) {
10231 Int24 *in = (Int24 *)inBuffer;
10232 scale = (Float32) ( 1.0 / 8388607.5 );
10233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10234 for (j=0; j<info.channels; j++) {
10235 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10236 out[info.outOffset[j]] += 0.5;
10237 out[info.outOffset[j]] *= scale;
10240 out += info.outJump;
10243 else if (info.inFormat == RTAUDIO_SINT32) {
10244 Int32 *in = (Int32 *)inBuffer;
10245 scale = (Float32) ( 1.0 / 2147483647.5 );
10246 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10247 for (j=0; j<info.channels; j++) {
10248 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10249 out[info.outOffset[j]] += 0.5;
10250 out[info.outOffset[j]] *= scale;
10253 out += info.outJump;
10256 else if (info.inFormat == RTAUDIO_FLOAT32) {
10257 // Channel compensation and/or (de)interleaving only.
10258 Float32 *in = (Float32 *)inBuffer;
10259 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10260 for (j=0; j<info.channels; j++) {
10261 out[info.outOffset[j]] = in[info.inOffset[j]];
10264 out += info.outJump;
10267 else if (info.inFormat == RTAUDIO_FLOAT64) {
10268 Float64 *in = (Float64 *)inBuffer;
10269 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10270 for (j=0; j<info.channels; j++) {
10271 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10274 out += info.outJump;
10278 else if (info.outFormat == RTAUDIO_SINT32) {
10279 Int32 *out = (Int32 *)outBuffer;
10280 if (info.inFormat == RTAUDIO_SINT8) {
10281 signed char *in = (signed char *)inBuffer;
10282 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10283 for (j=0; j<info.channels; j++) {
10284 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10285 out[info.outOffset[j]] <<= 24;
10288 out += info.outJump;
10291 else if (info.inFormat == RTAUDIO_SINT16) {
10292 Int16 *in = (Int16 *)inBuffer;
10293 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10294 for (j=0; j<info.channels; j++) {
10295 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10296 out[info.outOffset[j]] <<= 16;
10299 out += info.outJump;
10302 else if (info.inFormat == RTAUDIO_SINT24) {
10303 Int24 *in = (Int24 *)inBuffer;
10304 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10305 for (j=0; j<info.channels; j++) {
10306 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10307 out[info.outOffset[j]] <<= 8;
10310 out += info.outJump;
10313 else if (info.inFormat == RTAUDIO_SINT32) {
10314 // Channel compensation and/or (de)interleaving only.
10315 Int32 *in = (Int32 *)inBuffer;
10316 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10317 for (j=0; j<info.channels; j++) {
10318 out[info.outOffset[j]] = in[info.inOffset[j]];
10321 out += info.outJump;
10324 else if (info.inFormat == RTAUDIO_FLOAT32) {
10325 Float32 *in = (Float32 *)inBuffer;
10326 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10327 for (j=0; j<info.channels; j++) {
10328 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10331 out += info.outJump;
10334 else if (info.inFormat == RTAUDIO_FLOAT64) {
10335 Float64 *in = (Float64 *)inBuffer;
10336 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10337 for (j=0; j<info.channels; j++) {
10338 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10341 out += info.outJump;
10345 else if (info.outFormat == RTAUDIO_SINT24) {
10346 Int24 *out = (Int24 *)outBuffer;
10347 if (info.inFormat == RTAUDIO_SINT8) {
10348 signed char *in = (signed char *)inBuffer;
10349 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10350 for (j=0; j<info.channels; j++) {
10351 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10352 //out[info.outOffset[j]] <<= 16;
10355 out += info.outJump;
10358 else if (info.inFormat == RTAUDIO_SINT16) {
10359 Int16 *in = (Int16 *)inBuffer;
10360 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10361 for (j=0; j<info.channels; j++) {
10362 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10363 //out[info.outOffset[j]] <<= 8;
10366 out += info.outJump;
10369 else if (info.inFormat == RTAUDIO_SINT24) {
10370 // Channel compensation and/or (de)interleaving only.
10371 Int24 *in = (Int24 *)inBuffer;
10372 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10373 for (j=0; j<info.channels; j++) {
10374 out[info.outOffset[j]] = in[info.inOffset[j]];
10377 out += info.outJump;
10380 else if (info.inFormat == RTAUDIO_SINT32) {
10381 Int32 *in = (Int32 *)inBuffer;
10382 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10383 for (j=0; j<info.channels; j++) {
10384 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10385 //out[info.outOffset[j]] >>= 8;
10388 out += info.outJump;
10391 else if (info.inFormat == RTAUDIO_FLOAT32) {
10392 Float32 *in = (Float32 *)inBuffer;
10393 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10394 for (j=0; j<info.channels; j++) {
10395 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10398 out += info.outJump;
10401 else if (info.inFormat == RTAUDIO_FLOAT64) {
10402 Float64 *in = (Float64 *)inBuffer;
10403 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10404 for (j=0; j<info.channels; j++) {
10405 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10408 out += info.outJump;
10412 else if (info.outFormat == RTAUDIO_SINT16) {
10413 Int16 *out = (Int16 *)outBuffer;
10414 if (info.inFormat == RTAUDIO_SINT8) {
10415 signed char *in = (signed char *)inBuffer;
10416 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10417 for (j=0; j<info.channels; j++) {
10418 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10419 out[info.outOffset[j]] <<= 8;
10422 out += info.outJump;
10425 else if (info.inFormat == RTAUDIO_SINT16) {
10426 // Channel compensation and/or (de)interleaving only.
10427 Int16 *in = (Int16 *)inBuffer;
10428 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10429 for (j=0; j<info.channels; j++) {
10430 out[info.outOffset[j]] = in[info.inOffset[j]];
10433 out += info.outJump;
10436 else if (info.inFormat == RTAUDIO_SINT24) {
10437 Int24 *in = (Int24 *)inBuffer;
10438 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10439 for (j=0; j<info.channels; j++) {
10440 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10443 out += info.outJump;
10446 else if (info.inFormat == RTAUDIO_SINT32) {
10447 Int32 *in = (Int32 *)inBuffer;
10448 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10449 for (j=0; j<info.channels; j++) {
10450 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10453 out += info.outJump;
10456 else if (info.inFormat == RTAUDIO_FLOAT32) {
10457 Float32 *in = (Float32 *)inBuffer;
10458 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10459 for (j=0; j<info.channels; j++) {
10460 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10463 out += info.outJump;
10466 else if (info.inFormat == RTAUDIO_FLOAT64) {
10467 Float64 *in = (Float64 *)inBuffer;
10468 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10469 for (j=0; j<info.channels; j++) {
10470 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10473 out += info.outJump;
10477 else if (info.outFormat == RTAUDIO_SINT8) {
10478 signed char *out = (signed char *)outBuffer;
10479 if (info.inFormat == RTAUDIO_SINT8) {
10480 // Channel compensation and/or (de)interleaving only.
10481 signed char *in = (signed char *)inBuffer;
10482 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10483 for (j=0; j<info.channels; j++) {
10484 out[info.outOffset[j]] = in[info.inOffset[j]];
10487 out += info.outJump;
10490 if (info.inFormat == RTAUDIO_SINT16) {
10491 Int16 *in = (Int16 *)inBuffer;
10492 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10493 for (j=0; j<info.channels; j++) {
10494 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10497 out += info.outJump;
10500 else if (info.inFormat == RTAUDIO_SINT24) {
10501 Int24 *in = (Int24 *)inBuffer;
10502 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10503 for (j=0; j<info.channels; j++) {
10504 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10507 out += info.outJump;
10510 else if (info.inFormat == RTAUDIO_SINT32) {
10511 Int32 *in = (Int32 *)inBuffer;
10512 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10513 for (j=0; j<info.channels; j++) {
10514 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10517 out += info.outJump;
10520 else if (info.inFormat == RTAUDIO_FLOAT32) {
10521 Float32 *in = (Float32 *)inBuffer;
10522 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10523 for (j=0; j<info.channels; j++) {
10524 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10527 out += info.outJump;
10530 else if (info.inFormat == RTAUDIO_FLOAT64) {
10531 Float64 *in = (Float64 *)inBuffer;
10532 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10533 for (j=0; j<info.channels; j++) {
10534 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10537 out += info.outJump;
10543 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10544 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10545 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10547 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10553 if ( format == RTAUDIO_SINT16 ) {
10554 for ( unsigned int i=0; i<samples; i++ ) {
10555 // Swap 1st and 2nd bytes.
10560 // Increment 2 bytes.
10564 else if ( format == RTAUDIO_SINT32 ||
10565 format == RTAUDIO_FLOAT32 ) {
10566 for ( unsigned int i=0; i<samples; i++ ) {
10567 // Swap 1st and 4th bytes.
10572 // Swap 2nd and 3rd bytes.
10578 // Increment 3 more bytes.
10582 else if ( format == RTAUDIO_SINT24 ) {
10583 for ( unsigned int i=0; i<samples; i++ ) {
10584 // Swap 1st and 3rd bytes.
10589 // Increment 2 more bytes.
10593 else if ( format == RTAUDIO_FLOAT64 ) {
10594 for ( unsigned int i=0; i<samples; i++ ) {
10595 // Swap 1st and 8th bytes
10600 // Swap 2nd and 7th bytes
10606 // Swap 3rd and 6th bytes
10612 // Swap 4th and 5th bytes
10618 // Increment 5 more bytes.
10624 // Indentation settings for Vim and Emacs
10626 // Local Variables:
10627 // c-basic-offset: 2
10628 // indent-tabs-mode: nil
10631 // vim: et sts=2 sw=2