1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1686 if ( handle->drainCounter ) {
\r
1687 handle->drainCounter++;
\r
1692 AudioDeviceID inputDevice;
\r
1693 inputDevice = handle->id[1];
\r
1694 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1696 if ( handle->nStreams[1] == 1 ) {
\r
1697 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1698 convertBuffer( stream_.userBuffer[1],
\r
1699 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1700 stream_.convertInfo[1] );
\r
1702 else { // copy to user buffer
\r
1703 memcpy( stream_.userBuffer[1],
\r
1704 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1708 else { // read from multiple streams
\r
1709 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1710 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1712 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1713 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1714 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1715 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1716 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1719 else { // read from multiple multi-channel streams
\r
1720 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1721 Float32 *out, *in;
\r
1723 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1724 UInt32 outChannels = stream_.nUserChannels[1];
\r
1725 if ( stream_.doConvertBuffer[1] ) {
\r
1726 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1727 outChannels = stream_.nDeviceChannels[1];
\r
1730 if ( outInterleaved ) outOffset = 1;
\r
1731 else outOffset = stream_.bufferSize;
\r
1733 channelsLeft = outChannels;
\r
1734 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1736 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1737 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1740 // Account for possible channel offset in first stream
\r
1741 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1742 streamChannels -= stream_.channelOffset[1];
\r
1743 inJump = stream_.channelOffset[1];
\r
1747 // Account for possible unread channels at end of the last stream
\r
1748 if ( streamChannels > channelsLeft ) {
\r
1749 inJump = streamChannels - channelsLeft;
\r
1750 streamChannels = channelsLeft;
\r
1753 // Determine output buffer offsets and skips
\r
1754 if ( outInterleaved ) {
\r
1755 outJump = outChannels;
\r
1756 out += outChannels - channelsLeft;
\r
1760 out += (outChannels - channelsLeft) * outOffset;
\r
1763 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1764 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1765 out[j*outOffset] = *in++;
\r
1770 channelsLeft -= streamChannels;
\r
1774 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1775 convertBuffer( stream_.userBuffer[1],
\r
1776 stream_.deviceBuffer,
\r
1777 stream_.convertInfo[1] );
\r
1783 //MUTEX_UNLOCK( &stream_.mutex );
\r
1785 RtApi::tickStreamTime();
\r
1789 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1793 case kAudioHardwareNotRunningError:
\r
1794 return "kAudioHardwareNotRunningError";
\r
1796 case kAudioHardwareUnspecifiedError:
\r
1797 return "kAudioHardwareUnspecifiedError";
\r
1799 case kAudioHardwareUnknownPropertyError:
\r
1800 return "kAudioHardwareUnknownPropertyError";
\r
1802 case kAudioHardwareBadPropertySizeError:
\r
1803 return "kAudioHardwareBadPropertySizeError";
\r
1805 case kAudioHardwareIllegalOperationError:
\r
1806 return "kAudioHardwareIllegalOperationError";
\r
1808 case kAudioHardwareBadObjectError:
\r
1809 return "kAudioHardwareBadObjectError";
\r
1811 case kAudioHardwareBadDeviceError:
\r
1812 return "kAudioHardwareBadDeviceError";
\r
1814 case kAudioHardwareBadStreamError:
\r
1815 return "kAudioHardwareBadStreamError";
\r
1817 case kAudioHardwareUnsupportedOperationError:
\r
1818 return "kAudioHardwareUnsupportedOperationError";
\r
1820 case kAudioDeviceUnsupportedFormatError:
\r
1821 return "kAudioDeviceUnsupportedFormatError";
\r
1823 case kAudioDevicePermissionsError:
\r
1824 return "kAudioDevicePermissionsError";
\r
1827 return "CoreAudio unknown error";
\r
1831 //******************** End of __MACOSX_CORE__ *********************//
\r
1834 #if defined(__UNIX_JACK__)
\r
1836 // JACK is a low-latency audio server, originally written for the
\r
1837 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1838 // connect a number of different applications to an audio device, as
\r
1839 // well as allowing them to share audio between themselves.
\r
1841 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1842 // have ports connected to the server. The JACK server is typically
\r
1843 // started in a terminal as follows:
\r
1845 // .jackd -d alsa -d hw:0
\r
1847 // or through an interface program such as qjackctl. Many of the
\r
1848 // parameters normally set for a stream are fixed by the JACK server
\r
1849 // and can be specified when the JACK server is started. In
\r
1852 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1854 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1855 // frames, and number of buffers = 4. Once the server is running, it
\r
1856 // is not possible to override these values. If the values are not
\r
1857 // specified in the command-line, the JACK server uses default values.
\r
1859 // The JACK server does not have to be running when an instance of
\r
1860 // RtApiJack is created, though the function getDeviceCount() will
\r
1861 // report 0 devices found until JACK has been started. When no
\r
1862 // devices are available (i.e., the JACK server is not running), a
\r
1863 // stream cannot be opened.
\r
1865 #include <jack/jack.h>
\r
1866 #include <unistd.h>
\r
1869 // A structure to hold various information related to the Jack API
\r
1870 // implementation.
\r
1871 struct JackHandle {
\r
1872 jack_client_t *client;
\r
1873 jack_port_t **ports[2];
\r
1874 std::string deviceName[2];
\r
1876 pthread_cond_t condition;
\r
1877 int drainCounter; // Tracks callback counts when draining
\r
1878 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1881 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1884 static void jackSilentError( const char * ) {};
\r
1886 RtApiJack :: RtApiJack()
\r
1888 // Nothing to do here.
\r
1889 #if !defined(__RTAUDIO_DEBUG__)
\r
1890 // Turn off Jack's internal error reporting.
\r
1891 jack_set_error_function( &jackSilentError );
\r
1895 RtApiJack :: ~RtApiJack()
\r
1897 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1900 unsigned int RtApiJack :: getDeviceCount( void )
\r
1902 // See if we can become a jack client.
\r
1903 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1904 jack_status_t *status = NULL;
\r
1905 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1906 if ( client == 0 ) return 0;
\r
1908 const char **ports;
\r
1909 std::string port, previousPort;
\r
1910 unsigned int nChannels = 0, nDevices = 0;
\r
1911 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1913 // Parse the port names up to the first colon (:).
\r
1914 size_t iColon = 0;
\r
1916 port = (char *) ports[ nChannels ];
\r
1917 iColon = port.find(":");
\r
1918 if ( iColon != std::string::npos ) {
\r
1919 port = port.substr( 0, iColon + 1 );
\r
1920 if ( port != previousPort ) {
\r
1922 previousPort = port;
\r
1925 } while ( ports[++nChannels] );
\r
1929 jack_client_close( client );
\r
1933 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1935 RtAudio::DeviceInfo info;
\r
1936 info.probed = false;
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1941 if ( client == 0 ) {
\r
1942 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1943 error( RtAudioError::WARNING );
\r
1947 const char **ports;
\r
1948 std::string port, previousPort;
\r
1949 unsigned int nPorts = 0, nDevices = 0;
\r
1950 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1952 // Parse the port names up to the first colon (:).
\r
1953 size_t iColon = 0;
\r
1955 port = (char *) ports[ nPorts ];
\r
1956 iColon = port.find(":");
\r
1957 if ( iColon != std::string::npos ) {
\r
1958 port = port.substr( 0, iColon );
\r
1959 if ( port != previousPort ) {
\r
1960 if ( nDevices == device ) info.name = port;
\r
1962 previousPort = port;
\r
1965 } while ( ports[++nPorts] );
\r
1969 if ( device >= nDevices ) {
\r
1970 jack_client_close( client );
\r
1971 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1972 error( RtAudioError::INVALID_USE );
\r
1976 // Get the current jack server sample rate.
\r
1977 info.sampleRates.clear();
\r
1978 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1980 // Count the available ports containing the client name as device
\r
1981 // channels. Jack "input ports" equal RtAudio output channels.
\r
1982 unsigned int nChannels = 0;
\r
1983 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1985 while ( ports[ nChannels ] ) nChannels++;
\r
1987 info.outputChannels = nChannels;
\r
1990 // Jack "output ports" equal RtAudio input channels.
\r
1992 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1994 while ( ports[ nChannels ] ) nChannels++;
\r
1996 info.inputChannels = nChannels;
\r
1999 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2000 jack_client_close(client);
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 // If device opens for both playback and capture, we determine the channels.
\r
2007 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2008 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2010 // Jack always uses 32-bit floats.
\r
2011 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2013 // Jack doesn't provide default devices so we'll use the first available one.
\r
2014 if ( device == 0 && info.outputChannels > 0 )
\r
2015 info.isDefaultOutput = true;
\r
2016 if ( device == 0 && info.inputChannels > 0 )
\r
2017 info.isDefaultInput = true;
\r
2019 jack_client_close(client);
\r
2020 info.probed = true;
\r
2024 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2026 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2028 RtApiJack *object = (RtApiJack *) info->object;
\r
2029 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2034 // This function will be called by a spawned thread when the Jack
\r
2035 // server signals that it is shutting down. It is necessary to handle
\r
2036 // it this way because the jackShutdown() function must return before
\r
2037 // the jack_deactivate() function (in closeStream()) will return.
\r
2038 static void *jackCloseStream( void *ptr )
\r
2040 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2041 RtApiJack *object = (RtApiJack *) info->object;
\r
2043 object->closeStream();
\r
2045 pthread_exit( NULL );
\r
2047 static void jackShutdown( void *infoPointer )
\r
2049 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2050 RtApiJack *object = (RtApiJack *) info->object;
\r
2052 // Check current stream state. If stopped, then we'll assume this
\r
2053 // was called as a result of a call to RtApiJack::stopStream (the
\r
2054 // deactivation of a client handle causes this function to be called).
\r
2055 // If not, we'll assume the Jack server is shutting down or some
\r
2056 // other problem occurred and we should close the stream.
\r
2057 if ( object->isStreamRunning() == false ) return;
\r
2059 ThreadHandle threadId;
\r
2060 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2061 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2064 static int jackXrun( void *infoPointer )
\r
2066 JackHandle *handle = (JackHandle *) infoPointer;
\r
2068 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2069 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2074 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2075 unsigned int firstChannel, unsigned int sampleRate,
\r
2076 RtAudioFormat format, unsigned int *bufferSize,
\r
2077 RtAudio::StreamOptions *options )
\r
2079 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2081 // Look for jack server and try to become a client (only do once per stream).
\r
2082 jack_client_t *client = 0;
\r
2083 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2084 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2085 jack_status_t *status = NULL;
\r
2086 if ( options && !options->streamName.empty() )
\r
2087 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2089 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2090 if ( client == 0 ) {
\r
2091 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2092 error( RtAudioError::WARNING );
\r
2097 // The handle must have been created on an earlier pass.
\r
2098 client = handle->client;
\r
2101 const char **ports;
\r
2102 std::string port, previousPort, deviceName;
\r
2103 unsigned int nPorts = 0, nDevices = 0;
\r
2104 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2106 // Parse the port names up to the first colon (:).
\r
2107 size_t iColon = 0;
\r
2109 port = (char *) ports[ nPorts ];
\r
2110 iColon = port.find(":");
\r
2111 if ( iColon != std::string::npos ) {
\r
2112 port = port.substr( 0, iColon );
\r
2113 if ( port != previousPort ) {
\r
2114 if ( nDevices == device ) deviceName = port;
\r
2116 previousPort = port;
\r
2119 } while ( ports[++nPorts] );
\r
2123 if ( device >= nDevices ) {
\r
2124 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2128 // Count the available ports containing the client name as device
\r
2129 // channels. Jack "input ports" equal RtAudio output channels.
\r
2130 unsigned int nChannels = 0;
\r
2131 unsigned long flag = JackPortIsInput;
\r
2132 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2133 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2135 while ( ports[ nChannels ] ) nChannels++;
\r
2139 // Compare the jack ports for specified client to the requested number of channels.
\r
2140 if ( nChannels < (channels + firstChannel) ) {
\r
2141 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2142 errorText_ = errorStream_.str();
\r
2146 // Check the jack server sample rate.
\r
2147 unsigned int jackRate = jack_get_sample_rate( client );
\r
2148 if ( sampleRate != jackRate ) {
\r
2149 jack_client_close( client );
\r
2150 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2151 errorText_ = errorStream_.str();
\r
2154 stream_.sampleRate = jackRate;
\r
2156 // Get the latency of the JACK port.
\r
2157 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2158 if ( ports[ firstChannel ] ) {
\r
2159 // Added by Ge Wang
\r
2160 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2161 // the range (usually the min and max are equal)
\r
2162 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2163 // get the latency range
\r
2164 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2165 // be optimistic, use the min!
\r
2166 stream_.latency[mode] = latrange.min;
\r
2167 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2171 // The jack server always uses 32-bit floating-point data.
\r
2172 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2173 stream_.userFormat = format;
\r
2175 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2176 else stream_.userInterleaved = true;
\r
2178 // Jack always uses non-interleaved buffers.
\r
2179 stream_.deviceInterleaved[mode] = false;
\r
2181 // Jack always provides host byte-ordered data.
\r
2182 stream_.doByteSwap[mode] = false;
\r
2184 // Get the buffer size. The buffer size and number of buffers
\r
2185 // (periods) is set when the jack server is started.
\r
2186 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2187 *bufferSize = stream_.bufferSize;
\r
2189 stream_.nDeviceChannels[mode] = channels;
\r
2190 stream_.nUserChannels[mode] = channels;
\r
2192 // Set flags for buffer conversion.
\r
2193 stream_.doConvertBuffer[mode] = false;
\r
2194 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2195 stream_.doConvertBuffer[mode] = true;
\r
2196 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2197 stream_.nUserChannels[mode] > 1 )
\r
2198 stream_.doConvertBuffer[mode] = true;
\r
2200 // Allocate our JackHandle structure for the stream.
\r
2201 if ( handle == 0 ) {
\r
2203 handle = new JackHandle;
\r
2205 catch ( std::bad_alloc& ) {
\r
2206 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2210 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2211 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2214 stream_.apiHandle = (void *) handle;
\r
2215 handle->client = client;
\r
2217 handle->deviceName[mode] = deviceName;
\r
2219 // Allocate necessary internal buffers.
\r
2220 unsigned long bufferBytes;
\r
2221 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2222 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2223 if ( stream_.userBuffer[mode] == NULL ) {
\r
2224 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2228 if ( stream_.doConvertBuffer[mode] ) {
\r
2230 bool makeBuffer = true;
\r
2231 if ( mode == OUTPUT )
\r
2232 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2233 else { // mode == INPUT
\r
2234 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2235 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2236 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2237 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2241 if ( makeBuffer ) {
\r
2242 bufferBytes *= *bufferSize;
\r
2243 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2244 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2245 if ( stream_.deviceBuffer == NULL ) {
\r
2246 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2252 // Allocate memory for the Jack ports (channels) identifiers.
\r
2253 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2254 if ( handle->ports[mode] == NULL ) {
\r
2255 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2259 stream_.device[mode] = device;
\r
2260 stream_.channelOffset[mode] = firstChannel;
\r
2261 stream_.state = STREAM_STOPPED;
\r
2262 stream_.callbackInfo.object = (void *) this;
\r
2264 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2265 // We had already set up the stream for output.
\r
2266 stream_.mode = DUPLEX;
\r
2268 stream_.mode = mode;
\r
2269 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2270 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2271 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2274 // Register our ports.
\r
2276 if ( mode == OUTPUT ) {
\r
2277 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2278 snprintf( label, 64, "outport %d", i );
\r
2279 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2280 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2284 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2285 snprintf( label, 64, "inport %d", i );
\r
2286 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2287 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2291 // Setup the buffer conversion information structure. We don't use
\r
2292 // buffers to do channel offsets, so we override that parameter
\r
2294 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2300 pthread_cond_destroy( &handle->condition );
\r
2301 jack_client_close( handle->client );
\r
2303 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2304 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2307 stream_.apiHandle = 0;
\r
2310 for ( int i=0; i<2; i++ ) {
\r
2311 if ( stream_.userBuffer[i] ) {
\r
2312 free( stream_.userBuffer[i] );
\r
2313 stream_.userBuffer[i] = 0;
\r
2317 if ( stream_.deviceBuffer ) {
\r
2318 free( stream_.deviceBuffer );
\r
2319 stream_.deviceBuffer = 0;
\r
2325 void RtApiJack :: closeStream( void )
\r
2327 if ( stream_.state == STREAM_CLOSED ) {
\r
2328 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2329 error( RtAudioError::WARNING );
\r
2333 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2336 if ( stream_.state == STREAM_RUNNING )
\r
2337 jack_deactivate( handle->client );
\r
2339 jack_client_close( handle->client );
\r
2343 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2344 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2345 pthread_cond_destroy( &handle->condition );
\r
2347 stream_.apiHandle = 0;
\r
2350 for ( int i=0; i<2; i++ ) {
\r
2351 if ( stream_.userBuffer[i] ) {
\r
2352 free( stream_.userBuffer[i] );
\r
2353 stream_.userBuffer[i] = 0;
\r
2357 if ( stream_.deviceBuffer ) {
\r
2358 free( stream_.deviceBuffer );
\r
2359 stream_.deviceBuffer = 0;
\r
2362 stream_.mode = UNINITIALIZED;
\r
2363 stream_.state = STREAM_CLOSED;
\r
2366 void RtApiJack :: startStream( void )
\r
2369 if ( stream_.state == STREAM_RUNNING ) {
\r
2370 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2371 error( RtAudioError::WARNING );
\r
2375 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2376 int result = jack_activate( handle->client );
\r
2378 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2382 const char **ports;
\r
2384 // Get the list of available ports.
\r
2385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2387 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2388 if ( ports == NULL) {
\r
2389 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2393 // Now make the port connections. Since RtAudio wasn't designed to
\r
2394 // allow the user to select particular channels of a device, we'll
\r
2395 // just open the first "nChannels" ports with offset.
\r
2396 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2398 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2399 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2402 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2409 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2411 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2412 if ( ports == NULL) {
\r
2413 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2417 // Now make the port connections. See note above.
\r
2418 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2420 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2421 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2424 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2431 handle->drainCounter = 0;
\r
2432 handle->internalDrain = false;
\r
2433 stream_.state = STREAM_RUNNING;
\r
2436 if ( result == 0 ) return;
\r
2437 error( RtAudioError::SYSTEM_ERROR );
\r
2440 void RtApiJack :: stopStream( void )
\r
2443 if ( stream_.state == STREAM_STOPPED ) {
\r
2444 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2445 error( RtAudioError::WARNING );
\r
2449 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2452 if ( handle->drainCounter == 0 ) {
\r
2453 handle->drainCounter = 2;
\r
2454 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2458 jack_deactivate( handle->client );
\r
2459 stream_.state = STREAM_STOPPED;
\r
2462 void RtApiJack :: abortStream( void )
\r
2465 if ( stream_.state == STREAM_STOPPED ) {
\r
2466 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2467 error( RtAudioError::WARNING );
\r
2471 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2472 handle->drainCounter = 2;
\r
2477 // This function will be called by a spawned thread when the user
\r
2478 // callback function signals that the stream should be stopped or
\r
2479 // aborted. It is necessary to handle it this way because the
\r
2480 // callbackEvent() function must return before the jack_deactivate()
\r
2481 // function will return.
\r
2482 static void *jackStopStream( void *ptr )
\r
2484 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2485 RtApiJack *object = (RtApiJack *) info->object;
\r
2487 object->stopStream();
\r
2488 pthread_exit( NULL );
\r
2491 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2493 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2494 if ( stream_.state == STREAM_CLOSED ) {
\r
2495 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2496 error( RtAudioError::WARNING );
\r
2499 if ( stream_.bufferSize != nframes ) {
\r
2500 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2501 error( RtAudioError::WARNING );
\r
2505 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2506 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2508 // Check if we were draining the stream and signal is finished.
\r
2509 if ( handle->drainCounter > 3 ) {
\r
2510 ThreadHandle threadId;
\r
2512 stream_.state = STREAM_STOPPING;
\r
2513 if ( handle->internalDrain == true )
\r
2514 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2516 pthread_cond_signal( &handle->condition );
\r
2520 // Invoke user callback first, to get fresh output data.
\r
2521 if ( handle->drainCounter == 0 ) {
\r
2522 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2523 double streamTime = getStreamTime();
\r
2524 RtAudioStreamStatus status = 0;
\r
2525 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2526 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2527 handle->xrun[0] = false;
\r
2529 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2530 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2531 handle->xrun[1] = false;
\r
2533 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2534 stream_.bufferSize, streamTime, status, info->userData );
\r
2535 if ( cbReturnValue == 2 ) {
\r
2536 stream_.state = STREAM_STOPPING;
\r
2537 handle->drainCounter = 2;
\r
2539 pthread_create( &id, NULL, jackStopStream, info );
\r
2542 else if ( cbReturnValue == 1 ) {
\r
2543 handle->drainCounter = 1;
\r
2544 handle->internalDrain = true;
\r
2548 jack_default_audio_sample_t *jackbuffer;
\r
2549 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2552 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2554 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2555 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2556 memset( jackbuffer, 0, bufferBytes );
\r
2560 else if ( stream_.doConvertBuffer[0] ) {
\r
2562 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2564 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2565 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2566 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2569 else { // no buffer conversion
\r
2570 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2571 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2572 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2576 if ( handle->drainCounter ) {
\r
2577 handle->drainCounter++;
\r
2582 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2584 if ( stream_.doConvertBuffer[1] ) {
\r
2585 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2586 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2587 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2589 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2591 else { // no buffer conversion
\r
2592 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2593 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2594 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2600 RtApi::tickStreamTime();
\r
2603 //******************** End of __UNIX_JACK__ *********************//
\r
2606 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2608 // The ASIO API is designed around a callback scheme, so this
\r
2609 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2610 // Jack. The primary constraint with ASIO is that it only allows
\r
2611 // access to a single driver at a time. Thus, it is not possible to
\r
2612 // have more than one simultaneous RtAudio stream.
\r
2614 // This implementation also requires a number of external ASIO files
\r
2615 // and a few global variables. The ASIO callback scheme does not
\r
2616 // allow for the passing of user data, so we must create a global
\r
2617 // pointer to our callbackInfo structure.
\r
2619 // On unix systems, we make use of a pthread condition variable.
\r
2620 // Since there is no equivalent in Windows, I hacked something based
\r
2621 // on information found in
\r
2622 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2624 #include "asiosys.h"
\r
2626 #include "iasiothiscallresolver.h"
\r
2627 #include "asiodrivers.h"
\r
2630 static AsioDrivers drivers;
\r
2631 static ASIOCallbacks asioCallbacks;
\r
2632 static ASIODriverInfo driverInfo;
\r
2633 static CallbackInfo *asioCallbackInfo;
\r
2634 static bool asioXRun;
\r
2636 struct AsioHandle {
\r
2637 int drainCounter; // Tracks callback counts when draining
\r
2638 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2639 ASIOBufferInfo *bufferInfos;
\r
2643 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2646 // Function declarations (definitions at end of section)
\r
2647 static const char* getAsioErrorString( ASIOError result );
\r
2648 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2649 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2651 RtApiAsio :: RtApiAsio()
\r
2653 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2654 // CoInitialize beforehand, but it must be for appartment threading
\r
2655 // (in which case, CoInitilialize will return S_FALSE here).
\r
2656 coInitialized_ = false;
\r
2657 HRESULT hr = CoInitialize( NULL );
\r
2658 if ( FAILED(hr) ) {
\r
2659 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2660 error( RtAudioError::WARNING );
\r
2662 coInitialized_ = true;
\r
2664 drivers.removeCurrentDriver();
\r
2665 driverInfo.asioVersion = 2;
\r
2667 // See note in DirectSound implementation about GetDesktopWindow().
\r
2668 driverInfo.sysRef = GetForegroundWindow();
\r
2671 RtApiAsio :: ~RtApiAsio()
\r
2673 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2674 if ( coInitialized_ ) CoUninitialize();
\r
2677 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2679 return (unsigned int) drivers.asioGetNumDev();
\r
2682 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2684 RtAudio::DeviceInfo info;
\r
2685 info.probed = false;
\r
2688 unsigned int nDevices = getDeviceCount();
\r
2689 if ( nDevices == 0 ) {
\r
2690 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2691 error( RtAudioError::INVALID_USE );
\r
2695 if ( device >= nDevices ) {
\r
2696 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2697 error( RtAudioError::INVALID_USE );
\r
2701 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2702 if ( stream_.state != STREAM_CLOSED ) {
\r
2703 if ( device >= devices_.size() ) {
\r
2704 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2705 error( RtAudioError::WARNING );
\r
2708 return devices_[ device ];
\r
2711 char driverName[32];
\r
2712 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2713 if ( result != ASE_OK ) {
\r
2714 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2715 errorText_ = errorStream_.str();
\r
2716 error( RtAudioError::WARNING );
\r
2720 info.name = driverName;
\r
2722 if ( !drivers.loadDriver( driverName ) ) {
\r
2723 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2724 errorText_ = errorStream_.str();
\r
2725 error( RtAudioError::WARNING );
\r
2729 result = ASIOInit( &driverInfo );
\r
2730 if ( result != ASE_OK ) {
\r
2731 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2732 errorText_ = errorStream_.str();
\r
2733 error( RtAudioError::WARNING );
\r
2737 // Determine the device channel information.
\r
2738 long inputChannels, outputChannels;
\r
2739 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2740 if ( result != ASE_OK ) {
\r
2741 drivers.removeCurrentDriver();
\r
2742 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2743 errorText_ = errorStream_.str();
\r
2744 error( RtAudioError::WARNING );
\r
2748 info.outputChannels = outputChannels;
\r
2749 info.inputChannels = inputChannels;
\r
2750 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2751 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2753 // Determine the supported sample rates.
\r
2754 info.sampleRates.clear();
\r
2755 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2756 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2757 if ( result == ASE_OK )
\r
2758 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2761 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2762 ASIOChannelInfo channelInfo;
\r
2763 channelInfo.channel = 0;
\r
2764 channelInfo.isInput = true;
\r
2765 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2766 result = ASIOGetChannelInfo( &channelInfo );
\r
2767 if ( result != ASE_OK ) {
\r
2768 drivers.removeCurrentDriver();
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 info.nativeFormats = 0;
\r
2776 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2777 info.nativeFormats |= RTAUDIO_SINT16;
\r
2778 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT32;
\r
2780 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2784 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2785 info.nativeFormats |= RTAUDIO_SINT24;
\r
2787 if ( info.outputChannels > 0 )
\r
2788 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2789 if ( info.inputChannels > 0 )
\r
2790 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2792 info.probed = true;
\r
2793 drivers.removeCurrentDriver();
\r
2797 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2799 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2800 object->callbackEvent( index );
\r
2803 void RtApiAsio :: saveDeviceInfo( void )
\r
2807 unsigned int nDevices = getDeviceCount();
\r
2808 devices_.resize( nDevices );
\r
2809 for ( unsigned int i=0; i<nDevices; i++ )
\r
2810 devices_[i] = getDeviceInfo( i );
\r
2813 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2814 unsigned int firstChannel, unsigned int sampleRate,
\r
2815 RtAudioFormat format, unsigned int *bufferSize,
\r
2816 RtAudio::StreamOptions *options )
\r
2818 // For ASIO, a duplex stream MUST use the same driver.
\r
2819 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2820 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2824 char driverName[32];
\r
2825 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2826 if ( result != ASE_OK ) {
\r
2827 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2828 errorText_ = errorStream_.str();
\r
2832 // Only load the driver once for duplex stream.
\r
2833 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2834 // The getDeviceInfo() function will not work when a stream is open
\r
2835 // because ASIO does not allow multiple devices to run at the same
\r
2836 // time. Thus, we'll probe the system before opening a stream and
\r
2837 // save the results for use by getDeviceInfo().
\r
2838 this->saveDeviceInfo();
\r
2840 if ( !drivers.loadDriver( driverName ) ) {
\r
2841 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2842 errorText_ = errorStream_.str();
\r
2846 result = ASIOInit( &driverInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2849 errorText_ = errorStream_.str();
\r
2854 // Check the device channel count.
\r
2855 long inputChannels, outputChannels;
\r
2856 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2857 if ( result != ASE_OK ) {
\r
2858 drivers.removeCurrentDriver();
\r
2859 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2860 errorText_ = errorStream_.str();
\r
2864 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2865 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2866 drivers.removeCurrentDriver();
\r
2867 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2868 errorText_ = errorStream_.str();
\r
2871 stream_.nDeviceChannels[mode] = channels;
\r
2872 stream_.nUserChannels[mode] = channels;
\r
2873 stream_.channelOffset[mode] = firstChannel;
\r
2875 // Verify the sample rate is supported.
\r
2876 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2877 if ( result != ASE_OK ) {
\r
2878 drivers.removeCurrentDriver();
\r
2879 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2880 errorText_ = errorStream_.str();
\r
2884 // Get the current sample rate
\r
2885 ASIOSampleRate currentRate;
\r
2886 result = ASIOGetSampleRate( ¤tRate );
\r
2887 if ( result != ASE_OK ) {
\r
2888 drivers.removeCurrentDriver();
\r
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2890 errorText_ = errorStream_.str();
\r
2894 // Set the sample rate only if necessary
\r
2895 if ( currentRate != sampleRate ) {
\r
2896 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2897 if ( result != ASE_OK ) {
\r
2898 drivers.removeCurrentDriver();
\r
2899 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2900 errorText_ = errorStream_.str();
\r
2905 // Determine the driver data type.
\r
2906 ASIOChannelInfo channelInfo;
\r
2907 channelInfo.channel = 0;
\r
2908 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2909 else channelInfo.isInput = true;
\r
2910 result = ASIOGetChannelInfo( &channelInfo );
\r
2911 if ( result != ASE_OK ) {
\r
2912 drivers.removeCurrentDriver();
\r
2913 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2914 errorText_ = errorStream_.str();
\r
2918 // Assuming WINDOWS host is always little-endian.
\r
2919 stream_.doByteSwap[mode] = false;
\r
2920 stream_.userFormat = format;
\r
2921 stream_.deviceFormat[mode] = 0;
\r
2922 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2923 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2926 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2927 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2928 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2930 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2931 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2932 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2934 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2935 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2936 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2938 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2939 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2940 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2943 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2944 drivers.removeCurrentDriver();
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2946 errorText_ = errorStream_.str();
\r
2950 // Set the buffer size. For a duplex stream, this will end up
\r
2951 // setting the buffer size based on the input constraints, which
\r
2953 long minSize, maxSize, preferSize, granularity;
\r
2954 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2955 if ( result != ASE_OK ) {
\r
2956 drivers.removeCurrentDriver();
\r
2957 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2958 errorText_ = errorStream_.str();
\r
2962 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2963 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2964 else if ( granularity == -1 ) {
\r
2965 // Make sure bufferSize is a power of two.
\r
2966 int log2_of_min_size = 0;
\r
2967 int log2_of_max_size = 0;
\r
2969 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2970 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2971 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2974 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2975 int min_delta_num = log2_of_min_size;
\r
2977 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2978 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2979 if (current_delta < min_delta) {
\r
2980 min_delta = current_delta;
\r
2981 min_delta_num = i;
\r
2985 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2986 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2987 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2989 else if ( granularity != 0 ) {
\r
2990 // Set to an even multiple of granularity, rounding up.
\r
2991 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2994 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2995 drivers.removeCurrentDriver();
\r
2996 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3000 stream_.bufferSize = *bufferSize;
\r
3001 stream_.nBuffers = 2;
\r
3003 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3004 else stream_.userInterleaved = true;
\r
3006 // ASIO always uses non-interleaved buffers.
\r
3007 stream_.deviceInterleaved[mode] = false;
\r
3009 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3010 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3011 if ( handle == 0 ) {
\r
3013 handle = new AsioHandle;
\r
3015 catch ( std::bad_alloc& ) {
\r
3016 //if ( handle == NULL ) {
\r
3017 drivers.removeCurrentDriver();
\r
3018 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3021 handle->bufferInfos = 0;
\r
3023 // Create a manual-reset event.
\r
3024 handle->condition = CreateEvent( NULL, // no security
\r
3025 TRUE, // manual-reset
\r
3026 FALSE, // non-signaled initially
\r
3027 NULL ); // unnamed
\r
3028 stream_.apiHandle = (void *) handle;
\r
3031 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3032 // and output separately, we'll have to dispose of previously
\r
3033 // created output buffers for a duplex stream.
\r
3034 long inputLatency, outputLatency;
\r
3035 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3036 ASIODisposeBuffers();
\r
3037 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3040 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3041 bool buffersAllocated = false;
\r
3042 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3043 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3044 if ( handle->bufferInfos == NULL ) {
\r
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3046 errorText_ = errorStream_.str();
\r
3050 ASIOBufferInfo *infos;
\r
3051 infos = handle->bufferInfos;
\r
3052 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3053 infos->isInput = ASIOFalse;
\r
3054 infos->channelNum = i + stream_.channelOffset[0];
\r
3055 infos->buffers[0] = infos->buffers[1] = 0;
\r
3057 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3058 infos->isInput = ASIOTrue;
\r
3059 infos->channelNum = i + stream_.channelOffset[1];
\r
3060 infos->buffers[0] = infos->buffers[1] = 0;
\r
3063 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3064 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3065 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3066 asioCallbacks.asioMessage = &asioMessages;
\r
3067 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3068 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3069 if ( result != ASE_OK ) {
\r
3070 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3071 errorText_ = errorStream_.str();
\r
3074 buffersAllocated = true;
\r
3076 // Set flags for buffer conversion.
\r
3077 stream_.doConvertBuffer[mode] = false;
\r
3078 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3079 stream_.doConvertBuffer[mode] = true;
\r
3080 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3081 stream_.nUserChannels[mode] > 1 )
\r
3082 stream_.doConvertBuffer[mode] = true;
\r
3084 // Allocate necessary internal buffers
\r
3085 unsigned long bufferBytes;
\r
3086 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3087 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3088 if ( stream_.userBuffer[mode] == NULL ) {
\r
3089 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3093 if ( stream_.doConvertBuffer[mode] ) {
\r
3095 bool makeBuffer = true;
\r
3096 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3097 if ( mode == INPUT ) {
\r
3098 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3099 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3100 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3104 if ( makeBuffer ) {
\r
3105 bufferBytes *= *bufferSize;
\r
3106 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3107 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3108 if ( stream_.deviceBuffer == NULL ) {
\r
3109 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3115 stream_.sampleRate = sampleRate;
\r
3116 stream_.device[mode] = device;
\r
3117 stream_.state = STREAM_STOPPED;
\r
3118 asioCallbackInfo = &stream_.callbackInfo;
\r
3119 stream_.callbackInfo.object = (void *) this;
\r
3120 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3121 // We had already set up an output stream.
\r
3122 stream_.mode = DUPLEX;
\r
3124 stream_.mode = mode;
\r
3126 // Determine device latencies
\r
3127 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3128 if ( result != ASE_OK ) {
\r
3129 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3130 errorText_ = errorStream_.str();
\r
3131 error( RtAudioError::WARNING); // warn but don't fail
\r
3134 stream_.latency[0] = outputLatency;
\r
3135 stream_.latency[1] = inputLatency;
\r
3138 // Setup the buffer conversion information structure. We don't use
\r
3139 // buffers to do channel offsets, so we override that parameter
\r
3141 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3146 if ( buffersAllocated )
\r
3147 ASIODisposeBuffers();
\r
3148 drivers.removeCurrentDriver();
\r
3151 CloseHandle( handle->condition );
\r
3152 if ( handle->bufferInfos )
\r
3153 free( handle->bufferInfos );
\r
3155 stream_.apiHandle = 0;
\r
3158 for ( int i=0; i<2; i++ ) {
\r
3159 if ( stream_.userBuffer[i] ) {
\r
3160 free( stream_.userBuffer[i] );
\r
3161 stream_.userBuffer[i] = 0;
\r
3165 if ( stream_.deviceBuffer ) {
\r
3166 free( stream_.deviceBuffer );
\r
3167 stream_.deviceBuffer = 0;
\r
3173 void RtApiAsio :: closeStream()
\r
3175 if ( stream_.state == STREAM_CLOSED ) {
\r
3176 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3177 error( RtAudioError::WARNING );
\r
3181 if ( stream_.state == STREAM_RUNNING ) {
\r
3182 stream_.state = STREAM_STOPPED;
\r
3185 ASIODisposeBuffers();
\r
3186 drivers.removeCurrentDriver();
\r
3188 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3190 CloseHandle( handle->condition );
\r
3191 if ( handle->bufferInfos )
\r
3192 free( handle->bufferInfos );
\r
3194 stream_.apiHandle = 0;
\r
3197 for ( int i=0; i<2; i++ ) {
\r
3198 if ( stream_.userBuffer[i] ) {
\r
3199 free( stream_.userBuffer[i] );
\r
3200 stream_.userBuffer[i] = 0;
\r
3204 if ( stream_.deviceBuffer ) {
\r
3205 free( stream_.deviceBuffer );
\r
3206 stream_.deviceBuffer = 0;
\r
3209 stream_.mode = UNINITIALIZED;
\r
3210 stream_.state = STREAM_CLOSED;
\r
3213 bool stopThreadCalled = false;
\r
3215 void RtApiAsio :: startStream()
\r
3218 if ( stream_.state == STREAM_RUNNING ) {
\r
3219 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3220 error( RtAudioError::WARNING );
\r
3224 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3225 ASIOError result = ASIOStart();
\r
3226 if ( result != ASE_OK ) {
\r
3227 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3228 errorText_ = errorStream_.str();
\r
3232 handle->drainCounter = 0;
\r
3233 handle->internalDrain = false;
\r
3234 ResetEvent( handle->condition );
\r
3235 stream_.state = STREAM_RUNNING;
\r
3239 stopThreadCalled = false;
\r
3241 if ( result == ASE_OK ) return;
\r
3242 error( RtAudioError::SYSTEM_ERROR );
\r
3245 void RtApiAsio :: stopStream()
\r
3248 if ( stream_.state == STREAM_STOPPED ) {
\r
3249 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3250 error( RtAudioError::WARNING );
\r
3254 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3256 if ( handle->drainCounter == 0 ) {
\r
3257 handle->drainCounter = 2;
\r
3258 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3262 stream_.state = STREAM_STOPPED;
\r
3264 ASIOError result = ASIOStop();
\r
3265 if ( result != ASE_OK ) {
\r
3266 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3267 errorText_ = errorStream_.str();
\r
3270 if ( result == ASE_OK ) return;
\r
3271 error( RtAudioError::SYSTEM_ERROR );
\r
3274 void RtApiAsio :: abortStream()
\r
3277 if ( stream_.state == STREAM_STOPPED ) {
\r
3278 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3279 error( RtAudioError::WARNING );
\r
3283 // The following lines were commented-out because some behavior was
\r
3284 // noted where the device buffers need to be zeroed to avoid
\r
3285 // continuing sound, even when the device buffers are completely
\r
3286 // disposed. So now, calling abort is the same as calling stop.
\r
3287 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3288 // handle->drainCounter = 2;
\r
3292 // This function will be called by a spawned thread when the user
\r
3293 // callback function signals that the stream should be stopped or
\r
3294 // aborted. It is necessary to handle it this way because the
\r
3295 // callbackEvent() function must return before the ASIOStop()
\r
3296 // function will return.
\r
3297 static unsigned __stdcall asioStopStream( void *ptr )
\r
3299 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3300 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3302 object->stopStream();
\r
3303 _endthreadex( 0 );
\r
3307 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3309 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3310 if ( stream_.state == STREAM_CLOSED ) {
\r
3311 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3312 error( RtAudioError::WARNING );
\r
3316 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3317 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3319 // Check if we were draining the stream and signal if finished.
\r
3320 if ( handle->drainCounter > 3 ) {
\r
3322 stream_.state = STREAM_STOPPING;
\r
3323 if ( handle->internalDrain == false )
\r
3324 SetEvent( handle->condition );
\r
3325 else { // spawn a thread to stop the stream
\r
3326 unsigned threadId;
\r
3327 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3328 &stream_.callbackInfo, 0, &threadId );
\r
3333 // Invoke user callback to get fresh output data UNLESS we are
\r
3334 // draining stream.
\r
3335 if ( handle->drainCounter == 0 ) {
\r
3336 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3337 double streamTime = getStreamTime();
\r
3338 RtAudioStreamStatus status = 0;
\r
3339 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3340 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3343 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3344 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3347 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3348 stream_.bufferSize, streamTime, status, info->userData );
\r
3349 if ( cbReturnValue == 2 ) {
\r
3350 stream_.state = STREAM_STOPPING;
\r
3351 handle->drainCounter = 2;
\r
3352 unsigned threadId;
\r
3353 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3354 &stream_.callbackInfo, 0, &threadId );
\r
3357 else if ( cbReturnValue == 1 ) {
\r
3358 handle->drainCounter = 1;
\r
3359 handle->internalDrain = true;
\r
3363 unsigned int nChannels, bufferBytes, i, j;
\r
3364 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3365 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3367 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3369 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3373 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3377 else if ( stream_.doConvertBuffer[0] ) {
\r
3379 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3380 if ( stream_.doByteSwap[0] )
\r
3381 byteSwapBuffer( stream_.deviceBuffer,
\r
3382 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3383 stream_.deviceFormat[0] );
\r
3385 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3386 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3387 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3388 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3394 if ( stream_.doByteSwap[0] )
\r
3395 byteSwapBuffer( stream_.userBuffer[0],
\r
3396 stream_.bufferSize * stream_.nUserChannels[0],
\r
3397 stream_.userFormat );
\r
3399 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3400 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3401 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3402 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3407 if ( handle->drainCounter ) {
\r
3408 handle->drainCounter++;
\r
3413 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3415 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3417 if (stream_.doConvertBuffer[1]) {
\r
3419 // Always interleave ASIO input data.
\r
3420 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3421 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3422 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3423 handle->bufferInfos[i].buffers[bufferIndex],
\r
3427 if ( stream_.doByteSwap[1] )
\r
3428 byteSwapBuffer( stream_.deviceBuffer,
\r
3429 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3430 stream_.deviceFormat[1] );
\r
3431 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3435 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3436 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3437 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3438 handle->bufferInfos[i].buffers[bufferIndex],
\r
3443 if ( stream_.doByteSwap[1] )
\r
3444 byteSwapBuffer( stream_.userBuffer[1],
\r
3445 stream_.bufferSize * stream_.nUserChannels[1],
\r
3446 stream_.userFormat );
\r
3451 // The following call was suggested by Malte Clasen. While the API
\r
3452 // documentation indicates it should not be required, some device
\r
3453 // drivers apparently do not function correctly without it.
\r
3454 ASIOOutputReady();
\r
3456 RtApi::tickStreamTime();
\r
3460 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3462 // The ASIO documentation says that this usually only happens during
\r
3463 // external sync. Audio processing is not stopped by the driver,
\r
3464 // actual sample rate might not have even changed, maybe only the
\r
3465 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3468 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3470 object->stopStream();
\r
3472 catch ( RtAudioError &exception ) {
\r
3473 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3477 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3480 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3484 switch( selector ) {
\r
3485 case kAsioSelectorSupported:
\r
3486 if ( value == kAsioResetRequest
\r
3487 || value == kAsioEngineVersion
\r
3488 || value == kAsioResyncRequest
\r
3489 || value == kAsioLatenciesChanged
\r
3490 // The following three were added for ASIO 2.0, you don't
\r
3491 // necessarily have to support them.
\r
3492 || value == kAsioSupportsTimeInfo
\r
3493 || value == kAsioSupportsTimeCode
\r
3494 || value == kAsioSupportsInputMonitor)
\r
3497 case kAsioResetRequest:
\r
3498 // Defer the task and perform the reset of the driver during the
\r
3499 // next "safe" situation. You cannot reset the driver right now,
\r
3500 // as this code is called from the driver. Reset the driver is
\r
3501 // done by completely destruct is. I.e. ASIOStop(),
\r
3502 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3504 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3507 case kAsioResyncRequest:
\r
3508 // This informs the application that the driver encountered some
\r
3509 // non-fatal data loss. It is used for synchronization purposes
\r
3510 // of different media. Added mainly to work around the Win16Mutex
\r
3511 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3512 // which could lose data because the Mutex was held too long by
\r
3513 // another thread. However a driver can issue it in other
\r
3514 // situations, too.
\r
3515 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3519 case kAsioLatenciesChanged:
\r
3520 // This will inform the host application that the drivers were
\r
3521 // latencies changed. Beware, it this does not mean that the
\r
3522 // buffer sizes have changed! You might need to update internal
\r
3524 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3527 case kAsioEngineVersion:
\r
3528 // Return the supported ASIO version of the host application. If
\r
3529 // a host application does not implement this selector, ASIO 1.0
\r
3530 // is assumed by the driver.
\r
3533 case kAsioSupportsTimeInfo:
\r
3534 // Informs the driver whether the
\r
3535 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3536 // For compatibility with ASIO 1.0 drivers the host application
\r
3537 // should always support the "old" bufferSwitch method, too.
\r
3540 case kAsioSupportsTimeCode:
\r
3541 // Informs the driver whether application is interested in time
\r
3542 // code info. If an application does not need to know about time
\r
3543 // code, the driver has less work to do.
\r
3550 static const char* getAsioErrorString( ASIOError result )
\r
3555 const char*message;
\r
3558 static const Messages m[] =
\r
3560 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3561 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3562 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3563 { ASE_InvalidMode, "Invalid mode." },
\r
3564 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3565 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3566 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3569 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3570 if ( m[i].value == result ) return m[i].message;
\r
3572 return "Unknown error.";
\r
3575 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3579 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3581 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3582 // - Introduces support for the Windows WASAPI API
\r
3583 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3584 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3585 // - Includes automatic internal conversion of sample rate, buffer size and channel count
\r
3590 #include <audioclient.h>
\r
3592 #include <mmdeviceapi.h>
\r
3593 #include <functiondiscoverykeys_devpkey.h>
\r
3595 //=============================================================================
\r
3597 #define SAFE_RELEASE( objectPtr )\
\r
3600 objectPtr->Release();\
\r
3601 objectPtr = NULL;\
\r
3604 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3606 //-----------------------------------------------------------------------------
\r
3608 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3609 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3610 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3611 // provide intermediate storage for read / write synchronization.
\r
3612 class WasapiBuffer
\r
3616 : buffer_( NULL ),
\r
3625 // sets the length of the internal ring buffer
\r
3626 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3629 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3631 bufferSize_ = bufferSize;
\r
3636 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3637 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3639 if ( !buffer || // incoming buffer is NULL
\r
3640 bufferSize == 0 || // incoming buffer has no data
\r
3641 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3646 unsigned int relOutIndex = outIndex_;
\r
3647 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3648 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3649 relOutIndex += bufferSize_;
\r
3652 // "in" index can end on the "out" index but cannot begin at it
\r
3653 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3654 return false; // not enough space between "in" index and "out" index
\r
3657 // copy buffer from external to internal
\r
3658 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3659 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3660 int fromInSize = bufferSize - fromZeroSize;
\r
3664 case RTAUDIO_SINT8:
\r
3665 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3666 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3668 case RTAUDIO_SINT16:
\r
3669 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3670 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3672 case RTAUDIO_SINT24:
\r
3673 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3674 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3676 case RTAUDIO_SINT32:
\r
3677 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3678 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3680 case RTAUDIO_FLOAT32:
\r
3681 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3682 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3684 case RTAUDIO_FLOAT64:
\r
3685 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3686 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3690 // update "in" index
\r
3691 inIndex_ += bufferSize;
\r
3692 inIndex_ %= bufferSize_;
\r
3697 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3698 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3700 if ( !buffer || // incoming buffer is NULL
\r
3701 bufferSize == 0 || // incoming buffer has no data
\r
3702 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3707 unsigned int relInIndex = inIndex_;
\r
3708 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3709 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3710 relInIndex += bufferSize_;
\r
3713 // "out" index can begin at and end on the "in" index
\r
3714 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3715 return false; // not enough space between "out" index and "in" index
\r
3718 // copy buffer from internal to external
\r
3719 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3720 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3721 int fromOutSize = bufferSize - fromZeroSize;
\r
3725 case RTAUDIO_SINT8:
\r
3726 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3727 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3729 case RTAUDIO_SINT16:
\r
3730 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3731 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3733 case RTAUDIO_SINT24:
\r
3734 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3735 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3737 case RTAUDIO_SINT32:
\r
3738 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3739 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3741 case RTAUDIO_FLOAT32:
\r
3742 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3743 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3745 case RTAUDIO_FLOAT64:
\r
3746 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3747 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3751 // update "out" index
\r
3752 outIndex_ += bufferSize;
\r
3753 outIndex_ %= bufferSize_;
\r
3760 unsigned int bufferSize_;
\r
3761 unsigned int inIndex_;
\r
3762 unsigned int outIndex_;
\r
3765 //-----------------------------------------------------------------------------
\r
3767 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3768 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3769 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3770 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3771 // one rate and its multiple. RtApiWasapi will not populate a device's sample rate list with rates
\r
3772 // that may cause artifacts via this conversion.
\r
3773 void convertBufferWasapi( char* outBuffer,
\r
3774 const char* inBuffer,
\r
3775 const unsigned int& inChannelCount,
\r
3776 const unsigned int& outChannelCount,
\r
3777 const unsigned int& inSampleRate,
\r
3778 const unsigned int& outSampleRate,
\r
3779 const unsigned int& inSampleCount,
\r
3780 unsigned int& outSampleCount,
\r
3781 const RtAudioFormat& format )
\r
3783 // calculate the new outSampleCount and relative sampleStep
\r
3784 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3785 float sampleStep = 1.0f / sampleRatio;
\r
3786 float inSampleFraction = 0.0f;
\r
3787 unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );
\r
3789 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3791 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3792 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3794 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3798 case RTAUDIO_SINT8:
\r
3799 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3801 case RTAUDIO_SINT16:
\r
3802 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3804 case RTAUDIO_SINT24:
\r
3805 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3807 case RTAUDIO_SINT32:
\r
3808 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3810 case RTAUDIO_FLOAT32:
\r
3811 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3813 case RTAUDIO_FLOAT64:
\r
3814 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3818 // jump to next in sample
\r
3819 inSampleFraction += sampleStep;
\r
3823 //-----------------------------------------------------------------------------
\r
3825 // A structure to hold various information related to the WASAPI implementation.
\r
3826 struct WasapiHandle
\r
3828 IAudioClient* captureAudioClient;
\r
3829 IAudioClient* renderAudioClient;
\r
3830 IAudioCaptureClient* captureClient;
\r
3831 IAudioRenderClient* renderClient;
\r
3832 HANDLE captureEvent;
\r
3833 HANDLE renderEvent;
\r
3836 : captureAudioClient( NULL ),
\r
3837 renderAudioClient( NULL ),
\r
3838 captureClient( NULL ),
\r
3839 renderClient( NULL ),
\r
3840 captureEvent( NULL ),
\r
3841 renderEvent( NULL ) {}
\r
3844 //=============================================================================
\r
3846 RtApiWasapi::RtApiWasapi()
\r
3847 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3849 // WASAPI can run either apartment or multi-threaded
\r
3850 HRESULT hr = CoInitialize( NULL );
\r
3852 if ( !FAILED( hr ) )
\r
3853 coInitialized_ = true;
\r
3855 // Instantiate device enumerator
\r
3856 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3857 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3858 ( void** ) &deviceEnumerator_ );
\r
3860 if ( FAILED( hr ) ) {
\r
3861 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3862 error( RtAudioError::DRIVER_ERROR );
\r
3866 //-----------------------------------------------------------------------------
\r
3868 RtApiWasapi::~RtApiWasapi()
\r
3870 // if this object previously called CoInitialize()
\r
3871 if ( coInitialized_ ) {
\r
3875 if ( stream_.state != STREAM_CLOSED ) {
\r
3879 SAFE_RELEASE( deviceEnumerator_ );
\r
3882 //=============================================================================
\r
3884 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3886 unsigned int captureDeviceCount = 0;
\r
3887 unsigned int renderDeviceCount = 0;
\r
3889 IMMDeviceCollection* captureDevices = NULL;
\r
3890 IMMDeviceCollection* renderDevices = NULL;
\r
3892 // Count capture devices
\r
3893 errorText_.clear();
\r
3894 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3895 if ( FAILED( hr ) ) {
\r
3896 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3900 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3901 if ( FAILED( hr ) ) {
\r
3902 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3906 // Count render devices
\r
3907 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3908 if ( FAILED( hr ) ) {
\r
3909 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3913 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3914 if ( FAILED( hr ) ) {
\r
3915 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3920 // release all references
\r
3921 SAFE_RELEASE( captureDevices );
\r
3922 SAFE_RELEASE( renderDevices );
\r
3924 if ( errorText_.empty() )
\r
3925 return captureDeviceCount + renderDeviceCount;
\r
3927 error( RtAudioError::DRIVER_ERROR );
\r
3931 //-----------------------------------------------------------------------------
\r
3933 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3935 RtAudio::DeviceInfo info;
\r
3936 unsigned int captureDeviceCount = 0;
\r
3937 unsigned int renderDeviceCount = 0;
\r
3938 std::wstring deviceName;
\r
3939 std::string defaultDeviceName;
\r
3940 bool isCaptureDevice = false;
\r
3942 PROPVARIANT deviceNameProp;
\r
3943 PROPVARIANT defaultDeviceNameProp;
\r
3945 IMMDeviceCollection* captureDevices = NULL;
\r
3946 IMMDeviceCollection* renderDevices = NULL;
\r
3947 IMMDevice* devicePtr = NULL;
\r
3948 IMMDevice* defaultDevicePtr = NULL;
\r
3949 IAudioClient* audioClient = NULL;
\r
3950 IPropertyStore* devicePropStore = NULL;
\r
3951 IPropertyStore* defaultDevicePropStore = NULL;
\r
3953 WAVEFORMATEX* deviceFormat = NULL;
\r
3954 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3957 info.probed = false;
\r
3959 // Count capture devices
\r
3960 errorText_.clear();
\r
3961 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3962 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3963 if ( FAILED( hr ) ) {
\r
3964 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3968 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3969 if ( FAILED( hr ) ) {
\r
3970 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3974 // Count render devices
\r
3975 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3976 if ( FAILED( hr ) ) {
\r
3977 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3981 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3982 if ( FAILED( hr ) ) {
\r
3983 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3987 // validate device index
\r
3988 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3989 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3990 errorType = RtAudioError::INVALID_USE;
\r
3994 // determine whether index falls within capture or render devices
\r
3995 if ( device >= renderDeviceCount ) {
\r
3996 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3997 if ( FAILED( hr ) ) {
\r
3998 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4001 isCaptureDevice = true;
\r
4004 hr = renderDevices->Item( device, &devicePtr );
\r
4005 if ( FAILED( hr ) ) {
\r
4006 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4009 isCaptureDevice = false;
\r
4012 // get default device name
\r
4013 if ( isCaptureDevice ) {
\r
4014 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4015 if ( FAILED( hr ) ) {
\r
4016 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4021 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4022 if ( FAILED( hr ) ) {
\r
4023 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4028 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4029 if ( FAILED( hr ) ) {
\r
4030 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4033 PropVariantInit( &defaultDeviceNameProp );
\r
4035 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4036 if ( FAILED( hr ) ) {
\r
4037 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4041 deviceName = defaultDeviceNameProp.pwszVal;
\r
4042 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4045 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4046 if ( FAILED( hr ) ) {
\r
4047 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4051 PropVariantInit( &deviceNameProp );
\r
4053 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4054 if ( FAILED( hr ) ) {
\r
4055 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4059 deviceName = deviceNameProp.pwszVal;
\r
4060 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4063 if ( isCaptureDevice ) {
\r
4064 info.isDefaultInput = info.name == defaultDeviceName;
\r
4065 info.isDefaultOutput = false;
\r
4068 info.isDefaultInput = false;
\r
4069 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4073 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4074 if ( FAILED( hr ) ) {
\r
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4079 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4080 if ( FAILED( hr ) ) {
\r
4081 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4085 if ( isCaptureDevice ) {
\r
4086 info.inputChannels = deviceFormat->nChannels;
\r
4087 info.outputChannels = 0;
\r
4088 info.duplexChannels = 0;
\r
4091 info.inputChannels = 0;
\r
4092 info.outputChannels = deviceFormat->nChannels;
\r
4093 info.duplexChannels = 0;
\r
4097 info.sampleRates.clear();
\r
4099 // allow support for all sample rates as we have a built-in sample rate converter
\r
4100 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4101 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4105 info.nativeFormats = 0;
\r
4107 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4108 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4109 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4111 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4112 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4114 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4115 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4118 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4119 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4120 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4122 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4123 info.nativeFormats |= RTAUDIO_SINT8;
\r
4125 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4126 info.nativeFormats |= RTAUDIO_SINT16;
\r
4128 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4129 info.nativeFormats |= RTAUDIO_SINT24;
\r
4131 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4132 info.nativeFormats |= RTAUDIO_SINT32;
\r
4137 info.probed = true;
\r
4140 // release all references
\r
4141 PropVariantClear( &deviceNameProp );
\r
4142 PropVariantClear( &defaultDeviceNameProp );
\r
4144 SAFE_RELEASE( captureDevices );
\r
4145 SAFE_RELEASE( renderDevices );
\r
4146 SAFE_RELEASE( devicePtr );
\r
4147 SAFE_RELEASE( defaultDevicePtr );
\r
4148 SAFE_RELEASE( audioClient );
\r
4149 SAFE_RELEASE( devicePropStore );
\r
4150 SAFE_RELEASE( defaultDevicePropStore );
\r
4152 CoTaskMemFree( deviceFormat );
\r
4153 CoTaskMemFree( closestMatchFormat );
\r
4155 if ( !errorText_.empty() )
\r
4156 error( errorType );
\r
4160 //-----------------------------------------------------------------------------
\r
4162 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4164 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4165 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4173 //-----------------------------------------------------------------------------
\r
4175 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4177 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4178 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4186 //-----------------------------------------------------------------------------
\r
4188 void RtApiWasapi::closeStream( void )
\r
4190 if ( stream_.state == STREAM_CLOSED ) {
\r
4191 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4192 error( RtAudioError::WARNING );
\r
4196 if ( stream_.state != STREAM_STOPPED )
\r
4199 // clean up stream memory
\r
4200 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4201 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4203 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4204 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4206 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4207 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4209 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4210 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4212 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4213 stream_.apiHandle = NULL;
\r
4215 for ( int i = 0; i < 2; i++ ) {
\r
4216 if ( stream_.userBuffer[i] ) {
\r
4217 free( stream_.userBuffer[i] );
\r
4218 stream_.userBuffer[i] = 0;
\r
4222 if ( stream_.deviceBuffer ) {
\r
4223 free( stream_.deviceBuffer );
\r
4224 stream_.deviceBuffer = 0;
\r
4227 // update stream state
\r
4228 stream_.state = STREAM_CLOSED;
\r
4231 //-----------------------------------------------------------------------------
\r
4233 void RtApiWasapi::startStream( void )
\r
4237 if ( stream_.state == STREAM_RUNNING ) {
\r
4238 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4239 error( RtAudioError::WARNING );
\r
4243 // update stream state
\r
4244 stream_.state = STREAM_RUNNING;
\r
4246 // create WASAPI stream thread
\r
4247 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4249 if ( !stream_.callbackInfo.thread ) {
\r
4250 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4251 error( RtAudioError::THREAD_ERROR );
\r
4254 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4255 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4259 //-----------------------------------------------------------------------------
\r
4261 void RtApiWasapi::stopStream( void )
\r
4265 if ( stream_.state == STREAM_STOPPED ) {
\r
4266 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4267 error( RtAudioError::WARNING );
\r
4271 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4272 stream_.state = STREAM_STOPPING;
\r
4274 // wait until stream thread is stopped
\r
4275 while( stream_.state != STREAM_STOPPED ) {
\r
4279 // Wait for the last buffer to play before stopping.
\r
4280 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4282 // stop capture client if applicable
\r
4283 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4284 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4285 if ( FAILED( hr ) ) {
\r
4286 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4287 error( RtAudioError::DRIVER_ERROR );
\r
4292 // stop render client if applicable
\r
4293 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4294 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4295 if ( FAILED( hr ) ) {
\r
4296 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4297 error( RtAudioError::DRIVER_ERROR );
\r
4302 // close thread handle
\r
4303 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4304 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4305 error( RtAudioError::THREAD_ERROR );
\r
4309 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4312 //-----------------------------------------------------------------------------
\r
4314 void RtApiWasapi::abortStream( void )
\r
4318 if ( stream_.state == STREAM_STOPPED ) {
\r
4319 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4320 error( RtAudioError::WARNING );
\r
4324 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4325 stream_.state = STREAM_STOPPING;
\r
4327 // wait until stream thread is stopped
\r
4328 while ( stream_.state != STREAM_STOPPED ) {
\r
4332 // stop capture client if applicable
\r
4333 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4334 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4335 if ( FAILED( hr ) ) {
\r
4336 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4337 error( RtAudioError::DRIVER_ERROR );
\r
4342 // stop render client if applicable
\r
4343 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4344 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4345 if ( FAILED( hr ) ) {
\r
4346 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4347 error( RtAudioError::DRIVER_ERROR );
\r
4352 // close thread handle
\r
4353 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4354 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4355 error( RtAudioError::THREAD_ERROR );
\r
4359 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4362 //-----------------------------------------------------------------------------
\r
4364 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4365 unsigned int firstChannel, unsigned int sampleRate,
\r
4366 RtAudioFormat format, unsigned int* bufferSize,
\r
4367 RtAudio::StreamOptions* options )
\r
4369 bool methodResult = FAILURE;
\r
4370 unsigned int captureDeviceCount = 0;
\r
4371 unsigned int renderDeviceCount = 0;
\r
4373 IMMDeviceCollection* captureDevices = NULL;
\r
4374 IMMDeviceCollection* renderDevices = NULL;
\r
4375 IMMDevice* devicePtr = NULL;
\r
4376 WAVEFORMATEX* deviceFormat = NULL;
\r
4377 unsigned int bufferBytes;
\r
4378 stream_.state = STREAM_STOPPED;
\r
4380 // create API Handle if not already created
\r
4381 if ( !stream_.apiHandle )
\r
4382 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4384 // Count capture devices
\r
4385 errorText_.clear();
\r
4386 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4387 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4388 if ( FAILED( hr ) ) {
\r
4389 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4393 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4394 if ( FAILED( hr ) ) {
\r
4395 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4399 // Count render devices
\r
4400 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4401 if ( FAILED( hr ) ) {
\r
4402 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4406 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4407 if ( FAILED( hr ) ) {
\r
4408 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4412 // validate device index
\r
4413 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4414 errorType = RtAudioError::INVALID_USE;
\r
4415 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4419 // determine whether index falls within capture or render devices
\r
4420 if ( device >= renderDeviceCount ) {
\r
4421 if ( mode != INPUT ) {
\r
4422 errorType = RtAudioError::INVALID_USE;
\r
4423 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4427 // retrieve captureAudioClient from devicePtr
\r
4428 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4430 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4431 if ( FAILED( hr ) ) {
\r
4432 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4436 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4437 NULL, ( void** ) &captureAudioClient );
\r
4438 if ( FAILED( hr ) ) {
\r
4439 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4443 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4444 if ( FAILED( hr ) ) {
\r
4445 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4449 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4450 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4453 if ( mode != OUTPUT ) {
\r
4454 errorType = RtAudioError::INVALID_USE;
\r
4455 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4459 // retrieve renderAudioClient from devicePtr
\r
4460 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4462 hr = renderDevices->Item( device, &devicePtr );
\r
4463 if ( FAILED( hr ) ) {
\r
4464 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4468 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4469 NULL, ( void** ) &renderAudioClient );
\r
4470 if ( FAILED( hr ) ) {
\r
4471 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4475 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4476 if ( FAILED( hr ) ) {
\r
4477 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4481 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4482 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4485 // fill stream data
\r
4486 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4487 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4488 stream_.mode = DUPLEX;
\r
4491 stream_.mode = mode;
\r
4494 stream_.device[mode] = device;
\r
4495 stream_.doByteSwap[mode] = false;
\r
4496 stream_.sampleRate = sampleRate;
\r
4497 stream_.bufferSize = *bufferSize;
\r
4498 stream_.nBuffers = 1;
\r
4499 stream_.nUserChannels[mode] = channels;
\r
4500 stream_.channelOffset[mode] = firstChannel;
\r
4501 stream_.userFormat = format;
\r
4502 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4504 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4505 stream_.userInterleaved = false;
\r
4507 stream_.userInterleaved = true;
\r
4508 stream_.deviceInterleaved[mode] = true;
\r
4510 // Set flags for buffer conversion.
\r
4511 stream_.doConvertBuffer[mode] = false;
\r
4512 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4513 stream_.doConvertBuffer[mode] = true;
\r
4514 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4515 stream_.nUserChannels[mode] > 1 )
\r
4516 stream_.doConvertBuffer[mode] = true;
\r
4518 if ( stream_.doConvertBuffer[mode] )
\r
4519 setConvertInfo( mode, 0 );
\r
4521 // Allocate necessary internal buffers
\r
4522 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4524 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4525 if ( !stream_.userBuffer[mode] ) {
\r
4526 errorType = RtAudioError::MEMORY_ERROR;
\r
4527 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4531 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4532 stream_.callbackInfo.priority = 15;
\r
4534 stream_.callbackInfo.priority = 0;
\r
4536 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4537 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4539 methodResult = SUCCESS;
\r
4543 SAFE_RELEASE( captureDevices );
\r
4544 SAFE_RELEASE( renderDevices );
\r
4545 SAFE_RELEASE( devicePtr );
\r
4546 CoTaskMemFree( deviceFormat );
\r
4548 // if method failed, close the stream
\r
4549 if ( methodResult == FAILURE )
\r
4552 if ( !errorText_.empty() )
\r
4553 error( errorType );
\r
4554 return methodResult;
\r
4557 //=============================================================================
\r
4559 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4562 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4567 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4570 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4575 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4578 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4583 //-----------------------------------------------------------------------------
\r
4585 void RtApiWasapi::wasapiThread()
\r
4587 // as this is a new thread, we must CoInitialize it
\r
4588 CoInitialize( NULL );
\r
4592 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4593 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4594 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4595 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4596 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4597 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4599 WAVEFORMATEX* captureFormat = NULL;
\r
4600 WAVEFORMATEX* renderFormat = NULL;
\r
4601 float captureSrRatio = 0.0f;
\r
4602 float renderSrRatio = 0.0f;
\r
4603 WasapiBuffer captureBuffer;
\r
4604 WasapiBuffer renderBuffer;
\r
4606 // declare local stream variables
\r
4607 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4608 BYTE* streamBuffer = NULL;
\r
4609 unsigned long captureFlags = 0;
\r
4610 unsigned int bufferFrameCount = 0;
\r
4611 unsigned int numFramesPadding = 0;
\r
4612 unsigned int convBufferSize = 0;
\r
4613 bool callbackPushed = false;
\r
4614 bool callbackPulled = false;
\r
4615 bool callbackStopped = false;
\r
4616 int callbackResult = 0;
\r
4618 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4619 char* convBuffer = NULL;
\r
4620 unsigned int convBuffSize = 0;
\r
4621 unsigned int deviceBuffSize = 0;
\r
4623 errorText_.clear();
\r
4624 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4626 // Attempt to assign "Pro Audio" characteristic to thread
\r
4627 HMODULE AvrtDll = LoadLibrary( "AVRT.dll" );
\r
4629 DWORD taskIndex = 0;
\r
4630 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4631 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4632 FreeLibrary( AvrtDll );
\r
4635 // start capture stream if applicable
\r
4636 if ( captureAudioClient ) {
\r
4637 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4638 if ( FAILED( hr ) ) {
\r
4639 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4643 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4645 // initialize capture stream according to desire buffer size
\r
4646 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4647 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4649 if ( !captureClient ) {
\r
4650 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4651 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4652 desiredBufferPeriod,
\r
4653 desiredBufferPeriod,
\r
4656 if ( FAILED( hr ) ) {
\r
4657 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4661 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4662 ( void** ) &captureClient );
\r
4663 if ( FAILED( hr ) ) {
\r
4664 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4668 // configure captureEvent to trigger on every available capture buffer
\r
4669 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4670 if ( !captureEvent ) {
\r
4671 errorType = RtAudioError::SYSTEM_ERROR;
\r
4672 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4676 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4677 if ( FAILED( hr ) ) {
\r
4678 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4682 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4683 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4686 unsigned int inBufferSize = 0;
\r
4687 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4688 if ( FAILED( hr ) ) {
\r
4689 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4693 // scale outBufferSize according to stream->user sample rate ratio
\r
4694 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4695 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4697 // set captureBuffer size
\r
4698 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4700 // reset the capture stream
\r
4701 hr = captureAudioClient->Reset();
\r
4702 if ( FAILED( hr ) ) {
\r
4703 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4707 // start the capture stream
\r
4708 hr = captureAudioClient->Start();
\r
4709 if ( FAILED( hr ) ) {
\r
4710 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4715 // start render stream if applicable
\r
4716 if ( renderAudioClient ) {
\r
4717 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4718 if ( FAILED( hr ) ) {
\r
4719 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4723 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4725 // initialize render stream according to desire buffer size
\r
4726 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4727 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4729 if ( !renderClient ) {
\r
4730 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4731 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4732 desiredBufferPeriod,
\r
4733 desiredBufferPeriod,
\r
4736 if ( FAILED( hr ) ) {
\r
4737 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4741 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4742 ( void** ) &renderClient );
\r
4743 if ( FAILED( hr ) ) {
\r
4744 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4748 // configure renderEvent to trigger on every available render buffer
\r
4749 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4750 if ( !renderEvent ) {
\r
4751 errorType = RtAudioError::SYSTEM_ERROR;
\r
4752 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4756 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4757 if ( FAILED( hr ) ) {
\r
4758 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4762 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4763 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4766 unsigned int outBufferSize = 0;
\r
4767 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4768 if ( FAILED( hr ) ) {
\r
4769 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4773 // scale inBufferSize according to user->stream sample rate ratio
\r
4774 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4775 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4777 // set renderBuffer size
\r
4778 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4780 // reset the render stream
\r
4781 hr = renderAudioClient->Reset();
\r
4782 if ( FAILED( hr ) ) {
\r
4783 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4787 // start the render stream
\r
4788 hr = renderAudioClient->Start();
\r
4789 if ( FAILED( hr ) ) {
\r
4790 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4795 if ( stream_.mode == INPUT ) {
\r
4796 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4797 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4799 else if ( stream_.mode == OUTPUT ) {
\r
4800 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4801 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4803 else if ( stream_.mode == DUPLEX ) {
\r
4804 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4805 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4806 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4807 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4810 convBuffer = ( char* ) malloc( convBuffSize );
\r
4811 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4812 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4813 errorType = RtAudioError::MEMORY_ERROR;
\r
4814 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4818 // stream process loop
\r
4819 while ( stream_.state != STREAM_STOPPING ) {
\r
4820 if ( !callbackPulled ) {
\r
4823 // 1. Pull callback buffer from inputBuffer
\r
4824 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4825 // Convert callback buffer to user format
\r
4827 if ( captureAudioClient ) {
\r
4828 // Pull callback buffer from inputBuffer
\r
4829 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4830 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4831 stream_.deviceFormat[INPUT] );
\r
4833 if ( callbackPulled ) {
\r
4834 // Convert callback buffer to user sample rate and channel count
\r
4835 convertBufferWasapi( stream_.deviceBuffer,
\r
4837 stream_.nDeviceChannels[INPUT],
\r
4838 stream_.nUserChannels[INPUT],
\r
4839 captureFormat->nSamplesPerSec,
\r
4840 stream_.sampleRate,
\r
4841 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4843 stream_.deviceFormat[INPUT] );
\r
4845 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4846 // Convert callback buffer to user format
\r
4847 convertBuffer( stream_.userBuffer[INPUT],
\r
4848 stream_.deviceBuffer,
\r
4849 stream_.convertInfo[INPUT] );
\r
4852 // no conversion, simple copy deviceBuffer to userBuffer
\r
4853 memcpy( stream_.userBuffer[INPUT],
\r
4854 stream_.deviceBuffer,
\r
4855 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4860 // if there is no capture stream, set callbackPulled flag
\r
4861 callbackPulled = true;
\r
4864 // Execute Callback
\r
4865 // ================
\r
4866 // 1. Execute user callback method
\r
4867 // 2. Handle return value from callback
\r
4869 // if callback has not requested the stream to stop
\r
4870 if ( callbackPulled && !callbackStopped ) {
\r
4871 // Execute user callback method
\r
4872 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4873 stream_.userBuffer[INPUT],
\r
4874 stream_.bufferSize,
\r
4876 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4877 stream_.callbackInfo.userData );
\r
4879 // Handle return value from callback
\r
4880 if ( callbackResult == 1 ) {
\r
4881 // instantiate a thread to stop this thread
\r
4882 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4883 if ( !threadHandle ) {
\r
4884 errorType = RtAudioError::THREAD_ERROR;
\r
4885 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4888 else if ( !CloseHandle( threadHandle ) ) {
\r
4889 errorType = RtAudioError::THREAD_ERROR;
\r
4890 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4894 callbackStopped = true;
\r
4896 else if ( callbackResult == 2 ) {
\r
4897 // instantiate a thread to stop this thread
\r
4898 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4899 if ( !threadHandle ) {
\r
4900 errorType = RtAudioError::THREAD_ERROR;
\r
4901 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4904 else if ( !CloseHandle( threadHandle ) ) {
\r
4905 errorType = RtAudioError::THREAD_ERROR;
\r
4906 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4910 callbackStopped = true;
\r
4915 // Callback Output
\r
4916 // ===============
\r
4917 // 1. Convert callback buffer to stream format
\r
4918 // 2. Convert callback buffer to stream sample rate and channel count
\r
4919 // 3. Push callback buffer into outputBuffer
\r
4921 if ( renderAudioClient && callbackPulled ) {
\r
4922 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4923 // Convert callback buffer to stream format
\r
4924 convertBuffer( stream_.deviceBuffer,
\r
4925 stream_.userBuffer[OUTPUT],
\r
4926 stream_.convertInfo[OUTPUT] );
\r
4928 // Convert callback buffer to stream sample rate and channel count
\r
4929 convertBufferWasapi( convBuffer,
\r
4930 stream_.deviceBuffer,
\r
4931 stream_.nUserChannels[OUTPUT],
\r
4932 stream_.nDeviceChannels[OUTPUT],
\r
4933 stream_.sampleRate,
\r
4934 renderFormat->nSamplesPerSec,
\r
4935 stream_.bufferSize,
\r
4937 stream_.deviceFormat[OUTPUT] );
\r
4940 // Convert callback buffer to stream sample rate and channel count
\r
4941 convertBufferWasapi( convBuffer,
\r
4942 stream_.userBuffer[OUTPUT],
\r
4943 stream_.nUserChannels[OUTPUT],
\r
4944 stream_.nDeviceChannels[OUTPUT],
\r
4945 stream_.sampleRate,
\r
4946 renderFormat->nSamplesPerSec,
\r
4947 stream_.bufferSize,
\r
4949 stream_.deviceFormat[OUTPUT] );
\r
4952 // Push callback buffer into outputBuffer
\r
4953 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4954 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4955 stream_.deviceFormat[OUTPUT] );
\r
4960 // 1. Get capture buffer from stream
\r
4961 // 2. Push capture buffer into inputBuffer
\r
4962 // 3. If 2. was successful: Release capture buffer
\r
4964 if ( captureAudioClient ) {
\r
4965 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4966 if ( !callbackPulled ) {
\r
4967 WaitForSingleObject( captureEvent, INFINITE );
\r
4970 // Get capture buffer from stream
\r
4971 hr = captureClient->GetBuffer( &streamBuffer,
\r
4972 &bufferFrameCount,
\r
4973 &captureFlags, NULL, NULL );
\r
4974 if ( FAILED( hr ) ) {
\r
4975 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4979 if ( bufferFrameCount != 0 ) {
\r
4980 // Push capture buffer into inputBuffer
\r
4981 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4982 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4983 stream_.deviceFormat[INPUT] ) )
\r
4985 // Release capture buffer
\r
4986 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4987 if ( FAILED( hr ) ) {
\r
4988 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4994 // Inform WASAPI that capture was unsuccessful
\r
4995 hr = captureClient->ReleaseBuffer( 0 );
\r
4996 if ( FAILED( hr ) ) {
\r
4997 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5004 // Inform WASAPI that capture was unsuccessful
\r
5005 hr = captureClient->ReleaseBuffer( 0 );
\r
5006 if ( FAILED( hr ) ) {
\r
5007 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5015 // 1. Get render buffer from stream
\r
5016 // 2. Pull next buffer from outputBuffer
\r
5017 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5018 // Release render buffer
\r
5020 if ( renderAudioClient ) {
\r
5021 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5022 if ( callbackPulled && !callbackPushed ) {
\r
5023 WaitForSingleObject( renderEvent, INFINITE );
\r
5026 // Get render buffer from stream
\r
5027 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5028 if ( FAILED( hr ) ) {
\r
5029 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5033 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5034 if ( FAILED( hr ) ) {
\r
5035 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5039 bufferFrameCount -= numFramesPadding;
\r
5041 if ( bufferFrameCount != 0 ) {
\r
5042 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5043 if ( FAILED( hr ) ) {
\r
5044 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5048 // Pull next buffer from outputBuffer
\r
5049 // Fill render buffer with next buffer
\r
5050 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5051 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5052 stream_.deviceFormat[OUTPUT] ) )
\r
5054 // Release render buffer
\r
5055 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5056 if ( FAILED( hr ) ) {
\r
5057 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5063 // Inform WASAPI that render was unsuccessful
\r
5064 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5065 if ( FAILED( hr ) ) {
\r
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5073 // Inform WASAPI that render was unsuccessful
\r
5074 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5075 if ( FAILED( hr ) ) {
\r
5076 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5082 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5083 if ( callbackPushed ) {
\r
5084 callbackPulled = false;
\r
5087 // tick stream time
\r
5088 RtApi::tickStreamTime();
\r
5093 CoTaskMemFree( captureFormat );
\r
5094 CoTaskMemFree( renderFormat );
\r
5096 //delete convBuffer;
\r
5097 free ( convBuffer );
\r
5101 // update stream state
\r
5102 stream_.state = STREAM_STOPPED;
\r
5104 if ( errorText_.empty() )
\r
5107 error( errorType );
\r
5110 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5114 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5116 // Modified by Robin Davies, October 2005
\r
5117 // - Improvements to DirectX pointer chasing.
\r
5118 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5119 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5120 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5121 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5123 #include <dsound.h>
\r
5124 #include <assert.h>
\r
5125 #include <algorithm>
\r
5127 #if defined(__MINGW32__)
\r
5128 // missing from latest mingw winapi
\r
5129 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5130 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5131 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5132 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5135 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5137 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5138 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5141 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5143 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5144 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5145 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5146 return pointer >= earlierPointer && pointer < laterPointer;
\r
5149 // A structure to hold various information related to the DirectSound
\r
5150 // API implementation.
\r
5152 unsigned int drainCounter; // Tracks callback counts when draining
\r
5153 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5157 UINT bufferPointer[2];
\r
5158 DWORD dsBufferSize[2];
\r
5159 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5163 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5166 // Declarations for utility functions, callbacks, and structures
\r
5167 // specific to the DirectSound implementation.
\r
5168 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5169 LPCTSTR description,
\r
5171 LPVOID lpContext );
\r
5173 static const char* getErrorString( int code );
\r
5175 static unsigned __stdcall callbackHandler( void *ptr );
\r
5184 : found(false) { validId[0] = false; validId[1] = false; }
\r
5187 struct DsProbeData {
\r
5189 std::vector<struct DsDevice>* dsDevices;
\r
5192 RtApiDs :: RtApiDs()
\r
5194 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5195 // accept whatever the mainline chose for a threading model.
\r
5196 coInitialized_ = false;
\r
5197 HRESULT hr = CoInitialize( NULL );
\r
5198 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5201 RtApiDs :: ~RtApiDs()
\r
5203 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5204 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5207 // The DirectSound default output is always the first device.
\r
5208 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5213 // The DirectSound default input is always the first input device,
\r
5214 // which is the first capture device enumerated.
\r
5215 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5220 unsigned int RtApiDs :: getDeviceCount( void )
\r
5222 // Set query flag for previously found devices to false, so that we
\r
5223 // can check for any devices that have disappeared.
\r
5224 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5225 dsDevices[i].found = false;
\r
5227 // Query DirectSound devices.
\r
5228 struct DsProbeData probeInfo;
\r
5229 probeInfo.isInput = false;
\r
5230 probeInfo.dsDevices = &dsDevices;
\r
5231 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5232 if ( FAILED( result ) ) {
\r
5233 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5234 errorText_ = errorStream_.str();
\r
5235 error( RtAudioError::WARNING );
\r
5238 // Query DirectSoundCapture devices.
\r
5239 probeInfo.isInput = true;
\r
5240 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5241 if ( FAILED( result ) ) {
\r
5242 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5243 errorText_ = errorStream_.str();
\r
5244 error( RtAudioError::WARNING );
\r
5247 // Clean out any devices that may have disappeared.
\r
5248 std::vector< int > indices;
\r
5249 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5250 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5251 //unsigned int nErased = 0;
\r
5252 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5253 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5254 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5256 return static_cast<unsigned int>(dsDevices.size());
\r
5259 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5261 RtAudio::DeviceInfo info;
\r
5262 info.probed = false;
\r
5264 if ( dsDevices.size() == 0 ) {
\r
5265 // Force a query of all devices
\r
5267 if ( dsDevices.size() == 0 ) {
\r
5268 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5269 error( RtAudioError::INVALID_USE );
\r
5274 if ( device >= dsDevices.size() ) {
\r
5275 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5276 error( RtAudioError::INVALID_USE );
\r
5281 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5283 LPDIRECTSOUND output;
\r
5285 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5286 if ( FAILED( result ) ) {
\r
5287 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5288 errorText_ = errorStream_.str();
\r
5289 error( RtAudioError::WARNING );
\r
5293 outCaps.dwSize = sizeof( outCaps );
\r
5294 result = output->GetCaps( &outCaps );
\r
5295 if ( FAILED( result ) ) {
\r
5296 output->Release();
\r
5297 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5298 errorText_ = errorStream_.str();
\r
5299 error( RtAudioError::WARNING );
\r
5303 // Get output channel information.
\r
5304 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5306 // Get sample rate information.
\r
5307 info.sampleRates.clear();
\r
5308 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5309 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5310 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5311 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5314 // Get format information.
\r
5315 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5316 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5318 output->Release();
\r
5320 if ( getDefaultOutputDevice() == device )
\r
5321 info.isDefaultOutput = true;
\r
5323 if ( dsDevices[ device ].validId[1] == false ) {
\r
5324 info.name = dsDevices[ device ].name;
\r
5325 info.probed = true;
\r
5331 LPDIRECTSOUNDCAPTURE input;
\r
5332 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5333 if ( FAILED( result ) ) {
\r
5334 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5335 errorText_ = errorStream_.str();
\r
5336 error( RtAudioError::WARNING );
\r
5341 inCaps.dwSize = sizeof( inCaps );
\r
5342 result = input->GetCaps( &inCaps );
\r
5343 if ( FAILED( result ) ) {
\r
5345 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5346 errorText_ = errorStream_.str();
\r
5347 error( RtAudioError::WARNING );
\r
5351 // Get input channel information.
\r
5352 info.inputChannels = inCaps.dwChannels;
\r
5354 // Get sample rate and format information.
\r
5355 std::vector<unsigned int> rates;
\r
5356 if ( inCaps.dwChannels >= 2 ) {
\r
5357 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5358 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5362 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5363 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5364 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5366 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5369 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5372 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5374 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5375 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5376 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5379 else if ( inCaps.dwChannels == 1 ) {
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5386 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5389 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5391 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5392 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5395 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5397 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5398 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5399 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5402 else info.inputChannels = 0; // technically, this would be an error
\r
5406 if ( info.inputChannels == 0 ) return info;
\r
5408 // Copy the supported rates to the info structure but avoid duplication.
\r
5410 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5412 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5413 if ( rates[i] == info.sampleRates[j] ) {
\r
5418 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5420 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5422 // If device opens for both playback and capture, we determine the channels.
\r
5423 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5424 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5426 if ( device == 0 ) info.isDefaultInput = true;
\r
5428 // Copy name and return.
\r
5429 info.name = dsDevices[ device ].name;
\r
5430 info.probed = true;
\r
5434 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5435 unsigned int firstChannel, unsigned int sampleRate,
\r
5436 RtAudioFormat format, unsigned int *bufferSize,
\r
5437 RtAudio::StreamOptions *options )
\r
5439 if ( channels + firstChannel > 2 ) {
\r
5440 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5444 size_t nDevices = dsDevices.size();
\r
5445 if ( nDevices == 0 ) {
\r
5446 // This should not happen because a check is made before this function is called.
\r
5447 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5451 if ( device >= nDevices ) {
\r
5452 // This should not happen because a check is made before this function is called.
\r
5453 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5457 if ( mode == OUTPUT ) {
\r
5458 if ( dsDevices[ device ].validId[0] == false ) {
\r
5459 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5460 errorText_ = errorStream_.str();
\r
5464 else { // mode == INPUT
\r
5465 if ( dsDevices[ device ].validId[1] == false ) {
\r
5466 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5467 errorText_ = errorStream_.str();
\r
5472 // According to a note in PortAudio, using GetDesktopWindow()
\r
5473 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5474 // that occur when the application's window is not the foreground
\r
5475 // window. Also, if the application window closes before the
\r
5476 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5477 // problems when using GetDesktopWindow() but it seems fine now
\r
5478 // (January 2010). I'll leave it commented here.
\r
5479 // HWND hWnd = GetForegroundWindow();
\r
5480 HWND hWnd = GetDesktopWindow();
\r
5482 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5483 // two. This is a judgement call and a value of two is probably too
\r
5484 // low for capture, but it should work for playback.
\r
5486 if ( options ) nBuffers = options->numberOfBuffers;
\r
5487 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5488 if ( nBuffers < 2 ) nBuffers = 3;
\r
5490 // Check the lower range of the user-specified buffer size and set
\r
5491 // (arbitrarily) to a lower bound of 32.
\r
5492 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5494 // Create the wave format structure. The data format setting will
\r
5495 // be determined later.
\r
5496 WAVEFORMATEX waveFormat;
\r
5497 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5498 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5499 waveFormat.nChannels = channels + firstChannel;
\r
5500 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5502 // Determine the device buffer size. By default, we'll use the value
\r
5503 // defined above (32K), but we will grow it to make allowances for
\r
5504 // very large software buffer sizes.
\r
5505 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5506 DWORD dsPointerLeadTime = 0;
\r
5508 void *ohandle = 0, *bhandle = 0;
\r
5510 if ( mode == OUTPUT ) {
\r
5512 LPDIRECTSOUND output;
\r
5513 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5514 if ( FAILED( result ) ) {
\r
5515 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5516 errorText_ = errorStream_.str();
\r
5521 outCaps.dwSize = sizeof( outCaps );
\r
5522 result = output->GetCaps( &outCaps );
\r
5523 if ( FAILED( result ) ) {
\r
5524 output->Release();
\r
5525 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5526 errorText_ = errorStream_.str();
\r
5530 // Check channel information.
\r
5531 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5532 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5533 errorText_ = errorStream_.str();
\r
5537 // Check format information. Use 16-bit format unless not
\r
5538 // supported or user requests 8-bit.
\r
5539 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5540 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5541 waveFormat.wBitsPerSample = 16;
\r
5542 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5545 waveFormat.wBitsPerSample = 8;
\r
5546 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5548 stream_.userFormat = format;
\r
5550 // Update wave format structure and buffer information.
\r
5551 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5552 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5553 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5555 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5556 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5557 dsBufferSize *= 2;
\r
5559 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5560 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5561 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5562 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5563 if ( FAILED( result ) ) {
\r
5564 output->Release();
\r
5565 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5566 errorText_ = errorStream_.str();
\r
5570 // Even though we will write to the secondary buffer, we need to
\r
5571 // access the primary buffer to set the correct output format
\r
5572 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5573 // buffer description.
\r
5574 DSBUFFERDESC bufferDescription;
\r
5575 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5576 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5577 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5579 // Obtain the primary buffer
\r
5580 LPDIRECTSOUNDBUFFER buffer;
\r
5581 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5582 if ( FAILED( result ) ) {
\r
5583 output->Release();
\r
5584 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5585 errorText_ = errorStream_.str();
\r
5589 // Set the primary DS buffer sound format.
\r
5590 result = buffer->SetFormat( &waveFormat );
\r
5591 if ( FAILED( result ) ) {
\r
5592 output->Release();
\r
5593 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5594 errorText_ = errorStream_.str();
\r
5598 // Setup the secondary DS buffer description.
\r
5599 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5600 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5601 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5602 DSBCAPS_GLOBALFOCUS |
\r
5603 DSBCAPS_GETCURRENTPOSITION2 |
\r
5604 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5605 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5606 bufferDescription.lpwfxFormat = &waveFormat;
\r
5608 // Try to create the secondary DS buffer. If that doesn't work,
\r
5609 // try to use software mixing. Otherwise, there's a problem.
\r
5610 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5611 if ( FAILED( result ) ) {
\r
5612 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5613 DSBCAPS_GLOBALFOCUS |
\r
5614 DSBCAPS_GETCURRENTPOSITION2 |
\r
5615 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5616 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5617 if ( FAILED( result ) ) {
\r
5618 output->Release();
\r
5619 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5620 errorText_ = errorStream_.str();
\r
5625 // Get the buffer size ... might be different from what we specified.
\r
5627 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5628 result = buffer->GetCaps( &dsbcaps );
\r
5629 if ( FAILED( result ) ) {
\r
5630 output->Release();
\r
5631 buffer->Release();
\r
5632 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5633 errorText_ = errorStream_.str();
\r
5637 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5639 // Lock the DS buffer
\r
5642 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5643 if ( FAILED( result ) ) {
\r
5644 output->Release();
\r
5645 buffer->Release();
\r
5646 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5647 errorText_ = errorStream_.str();
\r
5651 // Zero the DS buffer
\r
5652 ZeroMemory( audioPtr, dataLen );
\r
5654 // Unlock the DS buffer
\r
5655 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5656 if ( FAILED( result ) ) {
\r
5657 output->Release();
\r
5658 buffer->Release();
\r
5659 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5660 errorText_ = errorStream_.str();
\r
5664 ohandle = (void *) output;
\r
5665 bhandle = (void *) buffer;
\r
5668 if ( mode == INPUT ) {
\r
5670 LPDIRECTSOUNDCAPTURE input;
\r
5671 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5672 if ( FAILED( result ) ) {
\r
5673 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5674 errorText_ = errorStream_.str();
\r
5679 inCaps.dwSize = sizeof( inCaps );
\r
5680 result = input->GetCaps( &inCaps );
\r
5681 if ( FAILED( result ) ) {
\r
5683 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5684 errorText_ = errorStream_.str();
\r
5688 // Check channel information.
\r
5689 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5690 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5694 // Check format information. Use 16-bit format unless user
\r
5695 // requests 8-bit.
\r
5696 DWORD deviceFormats;
\r
5697 if ( channels + firstChannel == 2 ) {
\r
5698 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5699 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5700 waveFormat.wBitsPerSample = 8;
\r
5701 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5703 else { // assume 16-bit is supported
\r
5704 waveFormat.wBitsPerSample = 16;
\r
5705 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5708 else { // channel == 1
\r
5709 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5710 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5711 waveFormat.wBitsPerSample = 8;
\r
5712 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5714 else { // assume 16-bit is supported
\r
5715 waveFormat.wBitsPerSample = 16;
\r
5716 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5719 stream_.userFormat = format;
\r
5721 // Update wave format structure and buffer information.
\r
5722 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5723 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5724 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5726 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5727 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5728 dsBufferSize *= 2;
\r
5730 // Setup the secondary DS buffer description.
\r
5731 DSCBUFFERDESC bufferDescription;
\r
5732 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5733 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5734 bufferDescription.dwFlags = 0;
\r
5735 bufferDescription.dwReserved = 0;
\r
5736 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5737 bufferDescription.lpwfxFormat = &waveFormat;
\r
5739 // Create the capture buffer.
\r
5740 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5741 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5742 if ( FAILED( result ) ) {
\r
5744 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5745 errorText_ = errorStream_.str();
\r
5749 // Get the buffer size ... might be different from what we specified.
\r
5750 DSCBCAPS dscbcaps;
\r
5751 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5752 result = buffer->GetCaps( &dscbcaps );
\r
5753 if ( FAILED( result ) ) {
\r
5755 buffer->Release();
\r
5756 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5757 errorText_ = errorStream_.str();
\r
5761 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5763 // NOTE: We could have a problem here if this is a duplex stream
\r
5764 // and the play and capture hardware buffer sizes are different
\r
5765 // (I'm actually not sure if that is a problem or not).
\r
5766 // Currently, we are not verifying that.
\r
5768 // Lock the capture buffer
\r
5771 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5772 if ( FAILED( result ) ) {
\r
5774 buffer->Release();
\r
5775 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5776 errorText_ = errorStream_.str();
\r
5780 // Zero the buffer
\r
5781 ZeroMemory( audioPtr, dataLen );
\r
5783 // Unlock the buffer
\r
5784 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5785 if ( FAILED( result ) ) {
\r
5787 buffer->Release();
\r
5788 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5789 errorText_ = errorStream_.str();
\r
5793 ohandle = (void *) input;
\r
5794 bhandle = (void *) buffer;
\r
5797 // Set various stream parameters
\r
5798 DsHandle *handle = 0;
\r
5799 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5800 stream_.nUserChannels[mode] = channels;
\r
5801 stream_.bufferSize = *bufferSize;
\r
5802 stream_.channelOffset[mode] = firstChannel;
\r
5803 stream_.deviceInterleaved[mode] = true;
\r
5804 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5805 else stream_.userInterleaved = true;
\r
5807 // Set flag for buffer conversion
\r
5808 stream_.doConvertBuffer[mode] = false;
\r
5809 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5810 stream_.doConvertBuffer[mode] = true;
\r
5811 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5812 stream_.doConvertBuffer[mode] = true;
\r
5813 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5814 stream_.nUserChannels[mode] > 1 )
\r
5815 stream_.doConvertBuffer[mode] = true;
\r
5817 // Allocate necessary internal buffers
\r
5818 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5819 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5820 if ( stream_.userBuffer[mode] == NULL ) {
\r
5821 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5825 if ( stream_.doConvertBuffer[mode] ) {
\r
5827 bool makeBuffer = true;
\r
5828 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5829 if ( mode == INPUT ) {
\r
5830 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5831 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5832 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5836 if ( makeBuffer ) {
\r
5837 bufferBytes *= *bufferSize;
\r
5838 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5839 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5840 if ( stream_.deviceBuffer == NULL ) {
\r
5841 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5847 // Allocate our DsHandle structures for the stream.
\r
5848 if ( stream_.apiHandle == 0 ) {
\r
5850 handle = new DsHandle;
\r
5852 catch ( std::bad_alloc& ) {
\r
5853 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5857 // Create a manual-reset event.
\r
5858 handle->condition = CreateEvent( NULL, // no security
\r
5859 TRUE, // manual-reset
\r
5860 FALSE, // non-signaled initially
\r
5861 NULL ); // unnamed
\r
5862 stream_.apiHandle = (void *) handle;
\r
5865 handle = (DsHandle *) stream_.apiHandle;
\r
5866 handle->id[mode] = ohandle;
\r
5867 handle->buffer[mode] = bhandle;
\r
5868 handle->dsBufferSize[mode] = dsBufferSize;
\r
5869 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5871 stream_.device[mode] = device;
\r
5872 stream_.state = STREAM_STOPPED;
\r
5873 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5874 // We had already set up an output stream.
\r
5875 stream_.mode = DUPLEX;
\r
5877 stream_.mode = mode;
\r
5878 stream_.nBuffers = nBuffers;
\r
5879 stream_.sampleRate = sampleRate;
\r
5881 // Setup the buffer conversion information structure.
\r
5882 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5884 // Setup the callback thread.
\r
5885 if ( stream_.callbackInfo.isRunning == false ) {
\r
5886 unsigned threadId;
\r
5887 stream_.callbackInfo.isRunning = true;
\r
5888 stream_.callbackInfo.object = (void *) this;
\r
5889 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5890 &stream_.callbackInfo, 0, &threadId );
\r
5891 if ( stream_.callbackInfo.thread == 0 ) {
\r
5892 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5896 // Boost DS thread priority
\r
5897 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5903 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5904 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5905 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5906 if ( buffer ) buffer->Release();
\r
5907 object->Release();
\r
5909 if ( handle->buffer[1] ) {
\r
5910 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5911 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5912 if ( buffer ) buffer->Release();
\r
5913 object->Release();
\r
5915 CloseHandle( handle->condition );
\r
5917 stream_.apiHandle = 0;
\r
5920 for ( int i=0; i<2; i++ ) {
\r
5921 if ( stream_.userBuffer[i] ) {
\r
5922 free( stream_.userBuffer[i] );
\r
5923 stream_.userBuffer[i] = 0;
\r
5927 if ( stream_.deviceBuffer ) {
\r
5928 free( stream_.deviceBuffer );
\r
5929 stream_.deviceBuffer = 0;
\r
5932 stream_.state = STREAM_CLOSED;
\r
5936 void RtApiDs :: closeStream()
\r
5938 if ( stream_.state == STREAM_CLOSED ) {
\r
5939 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5940 error( RtAudioError::WARNING );
\r
5944 // Stop the callback thread.
\r
5945 stream_.callbackInfo.isRunning = false;
\r
5946 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5947 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5949 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5951 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5952 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5953 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5956 buffer->Release();
\r
5958 object->Release();
\r
5960 if ( handle->buffer[1] ) {
\r
5961 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5962 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5965 buffer->Release();
\r
5967 object->Release();
\r
5969 CloseHandle( handle->condition );
\r
5971 stream_.apiHandle = 0;
\r
5974 for ( int i=0; i<2; i++ ) {
\r
5975 if ( stream_.userBuffer[i] ) {
\r
5976 free( stream_.userBuffer[i] );
\r
5977 stream_.userBuffer[i] = 0;
\r
5981 if ( stream_.deviceBuffer ) {
\r
5982 free( stream_.deviceBuffer );
\r
5983 stream_.deviceBuffer = 0;
\r
5986 stream_.mode = UNINITIALIZED;
\r
5987 stream_.state = STREAM_CLOSED;
\r
5990 void RtApiDs :: startStream()
\r
5993 if ( stream_.state == STREAM_RUNNING ) {
\r
5994 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5995 error( RtAudioError::WARNING );
\r
5999 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6001 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6002 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6003 // this is already in effect.
\r
6004 timeBeginPeriod( 1 );
\r
6006 buffersRolling = false;
\r
6007 duplexPrerollBytes = 0;
\r
6009 if ( stream_.mode == DUPLEX ) {
\r
6010 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6011 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6014 HRESULT result = 0;
\r
6015 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6017 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6018 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6019 if ( FAILED( result ) ) {
\r
6020 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6021 errorText_ = errorStream_.str();
\r
6026 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6028 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6029 result = buffer->Start( DSCBSTART_LOOPING );
\r
6030 if ( FAILED( result ) ) {
\r
6031 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6032 errorText_ = errorStream_.str();
\r
6037 handle->drainCounter = 0;
\r
6038 handle->internalDrain = false;
\r
6039 ResetEvent( handle->condition );
\r
6040 stream_.state = STREAM_RUNNING;
\r
6043 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6046 void RtApiDs :: stopStream()
\r
6049 if ( stream_.state == STREAM_STOPPED ) {
\r
6050 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6051 error( RtAudioError::WARNING );
\r
6055 HRESULT result = 0;
\r
6058 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6059 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6060 if ( handle->drainCounter == 0 ) {
\r
6061 handle->drainCounter = 2;
\r
6062 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6065 stream_.state = STREAM_STOPPED;
\r
6067 // Stop the buffer and clear memory
\r
6068 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6069 result = buffer->Stop();
\r
6070 if ( FAILED( result ) ) {
\r
6071 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6072 errorText_ = errorStream_.str();
\r
6076 // Lock the buffer and clear it so that if we start to play again,
\r
6077 // we won't have old data playing.
\r
6078 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6079 if ( FAILED( result ) ) {
\r
6080 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6081 errorText_ = errorStream_.str();
\r
6085 // Zero the DS buffer
\r
6086 ZeroMemory( audioPtr, dataLen );
\r
6088 // Unlock the DS buffer
\r
6089 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6090 if ( FAILED( result ) ) {
\r
6091 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6092 errorText_ = errorStream_.str();
\r
6096 // If we start playing again, we must begin at beginning of buffer.
\r
6097 handle->bufferPointer[0] = 0;
\r
6100 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6101 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6105 stream_.state = STREAM_STOPPED;
\r
6107 result = buffer->Stop();
\r
6108 if ( FAILED( result ) ) {
\r
6109 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6110 errorText_ = errorStream_.str();
\r
6114 // Lock the buffer and clear it so that if we start to play again,
\r
6115 // we won't have old data playing.
\r
6116 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6117 if ( FAILED( result ) ) {
\r
6118 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6119 errorText_ = errorStream_.str();
\r
6123 // Zero the DS buffer
\r
6124 ZeroMemory( audioPtr, dataLen );
\r
6126 // Unlock the DS buffer
\r
6127 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6128 if ( FAILED( result ) ) {
\r
6129 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6130 errorText_ = errorStream_.str();
\r
6134 // If we start recording again, we must begin at beginning of buffer.
\r
6135 handle->bufferPointer[1] = 0;
\r
6139 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6140 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6143 void RtApiDs :: abortStream()
\r
6146 if ( stream_.state == STREAM_STOPPED ) {
\r
6147 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6148 error( RtAudioError::WARNING );
\r
6152 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6153 handle->drainCounter = 2;
\r
6158 void RtApiDs :: callbackEvent()
\r
6160 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6161 Sleep( 50 ); // sleep 50 milliseconds
\r
6165 if ( stream_.state == STREAM_CLOSED ) {
\r
6166 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6167 error( RtAudioError::WARNING );
\r
6171 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6172 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6174 // Check if we were draining the stream and signal is finished.
\r
6175 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6177 stream_.state = STREAM_STOPPING;
\r
6178 if ( handle->internalDrain == false )
\r
6179 SetEvent( handle->condition );
\r
6185 // Invoke user callback to get fresh output data UNLESS we are
\r
6186 // draining stream.
\r
6187 if ( handle->drainCounter == 0 ) {
\r
6188 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6189 double streamTime = getStreamTime();
\r
6190 RtAudioStreamStatus status = 0;
\r
6191 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6192 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6193 handle->xrun[0] = false;
\r
6195 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6196 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6197 handle->xrun[1] = false;
\r
6199 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6200 stream_.bufferSize, streamTime, status, info->userData );
\r
6201 if ( cbReturnValue == 2 ) {
\r
6202 stream_.state = STREAM_STOPPING;
\r
6203 handle->drainCounter = 2;
\r
6207 else if ( cbReturnValue == 1 ) {
\r
6208 handle->drainCounter = 1;
\r
6209 handle->internalDrain = true;
\r
6214 DWORD currentWritePointer, safeWritePointer;
\r
6215 DWORD currentReadPointer, safeReadPointer;
\r
6216 UINT nextWritePointer;
\r
6218 LPVOID buffer1 = NULL;
\r
6219 LPVOID buffer2 = NULL;
\r
6220 DWORD bufferSize1 = 0;
\r
6221 DWORD bufferSize2 = 0;
\r
6226 if ( buffersRolling == false ) {
\r
6227 if ( stream_.mode == DUPLEX ) {
\r
6228 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6230 // It takes a while for the devices to get rolling. As a result,
\r
6231 // there's no guarantee that the capture and write device pointers
\r
6232 // will move in lockstep. Wait here for both devices to start
\r
6233 // rolling, and then set our buffer pointers accordingly.
\r
6234 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6235 // bytes later than the write buffer.
\r
6237 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6238 // take place between the two GetCurrentPosition calls... but I'm
\r
6239 // really not sure how to solve the problem. Temporarily boost to
\r
6240 // Realtime priority, maybe; but I'm not sure what priority the
\r
6241 // DirectSound service threads run at. We *should* be roughly
\r
6242 // within a ms or so of correct.
\r
6244 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6245 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6247 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6249 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6250 if ( FAILED( result ) ) {
\r
6251 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6252 errorText_ = errorStream_.str();
\r
6253 error( RtAudioError::SYSTEM_ERROR );
\r
6256 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6257 if ( FAILED( result ) ) {
\r
6258 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6259 errorText_ = errorStream_.str();
\r
6260 error( RtAudioError::SYSTEM_ERROR );
\r
6264 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6265 if ( FAILED( result ) ) {
\r
6266 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6267 errorText_ = errorStream_.str();
\r
6268 error( RtAudioError::SYSTEM_ERROR );
\r
6271 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6272 if ( FAILED( result ) ) {
\r
6273 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6274 errorText_ = errorStream_.str();
\r
6275 error( RtAudioError::SYSTEM_ERROR );
\r
6278 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6282 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6284 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6285 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6286 handle->bufferPointer[1] = safeReadPointer;
\r
6288 else if ( stream_.mode == OUTPUT ) {
\r
6290 // Set the proper nextWritePosition after initial startup.
\r
6291 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6292 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6293 if ( FAILED( result ) ) {
\r
6294 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6295 errorText_ = errorStream_.str();
\r
6296 error( RtAudioError::SYSTEM_ERROR );
\r
6299 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6300 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6303 buffersRolling = true;
\r
6306 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6308 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6310 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6311 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6312 bufferBytes *= formatBytes( stream_.userFormat );
\r
6313 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6316 // Setup parameters and do buffer conversion if necessary.
\r
6317 if ( stream_.doConvertBuffer[0] ) {
\r
6318 buffer = stream_.deviceBuffer;
\r
6319 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6320 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6321 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6324 buffer = stream_.userBuffer[0];
\r
6325 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6326 bufferBytes *= formatBytes( stream_.userFormat );
\r
6329 // No byte swapping necessary in DirectSound implementation.
\r
6331 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6332 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6334 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6335 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6337 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6338 nextWritePointer = handle->bufferPointer[0];
\r
6340 DWORD endWrite, leadPointer;
\r
6342 // Find out where the read and "safe write" pointers are.
\r
6343 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6344 if ( FAILED( result ) ) {
\r
6345 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6346 errorText_ = errorStream_.str();
\r
6347 error( RtAudioError::SYSTEM_ERROR );
\r
6351 // We will copy our output buffer into the region between
\r
6352 // safeWritePointer and leadPointer. If leadPointer is not
\r
6353 // beyond the next endWrite position, wait until it is.
\r
6354 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6355 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6356 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6357 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6358 endWrite = nextWritePointer + bufferBytes;
\r
6360 // Check whether the entire write region is behind the play pointer.
\r
6361 if ( leadPointer >= endWrite ) break;
\r
6363 // If we are here, then we must wait until the leadPointer advances
\r
6364 // beyond the end of our next write region. We use the
\r
6365 // Sleep() function to suspend operation until that happens.
\r
6366 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6367 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6368 if ( millis < 1.0 ) millis = 1.0;
\r
6369 Sleep( (DWORD) millis );
\r
6372 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6373 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6374 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6375 handle->xrun[0] = true;
\r
6376 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6377 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6378 handle->bufferPointer[0] = nextWritePointer;
\r
6379 endWrite = nextWritePointer + bufferBytes;
\r
6382 // Lock free space in the buffer
\r
6383 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6384 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6385 if ( FAILED( result ) ) {
\r
6386 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6387 errorText_ = errorStream_.str();
\r
6388 error( RtAudioError::SYSTEM_ERROR );
\r
6392 // Copy our buffer into the DS buffer
\r
6393 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6394 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6396 // Update our buffer offset and unlock sound buffer
\r
6397 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6398 if ( FAILED( result ) ) {
\r
6399 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6400 errorText_ = errorStream_.str();
\r
6401 error( RtAudioError::SYSTEM_ERROR );
\r
6404 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6405 handle->bufferPointer[0] = nextWritePointer;
\r
6407 if ( handle->drainCounter ) {
\r
6408 handle->drainCounter++;
\r
6413 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6415 // Setup parameters.
\r
6416 if ( stream_.doConvertBuffer[1] ) {
\r
6417 buffer = stream_.deviceBuffer;
\r
6418 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6419 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6422 buffer = stream_.userBuffer[1];
\r
6423 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6424 bufferBytes *= formatBytes( stream_.userFormat );
\r
6427 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6428 long nextReadPointer = handle->bufferPointer[1];
\r
6429 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6431 // Find out where the write and "safe read" pointers are.
\r
6432 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6433 if ( FAILED( result ) ) {
\r
6434 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6435 errorText_ = errorStream_.str();
\r
6436 error( RtAudioError::SYSTEM_ERROR );
\r
6440 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6441 DWORD endRead = nextReadPointer + bufferBytes;
\r
6443 // Handling depends on whether we are INPUT or DUPLEX.
\r
6444 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6445 // then a wait here will drag the write pointers into the forbidden zone.
\r
6447 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6448 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6449 // practical way to sync up the read and write pointers reliably, given the
\r
6450 // the very complex relationship between phase and increment of the read and write
\r
6453 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6454 // provide a pre-roll period of 0.5 seconds in which we return
\r
6455 // zeros from the read buffer while the pointers sync up.
\r
6457 if ( stream_.mode == DUPLEX ) {
\r
6458 if ( safeReadPointer < endRead ) {
\r
6459 if ( duplexPrerollBytes <= 0 ) {
\r
6460 // Pre-roll time over. Be more agressive.
\r
6461 int adjustment = endRead-safeReadPointer;
\r
6463 handle->xrun[1] = true;
\r
6465 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6466 // and perform fine adjustments later.
\r
6467 // - small adjustments: back off by twice as much.
\r
6468 if ( adjustment >= 2*bufferBytes )
\r
6469 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6471 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6473 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6477 // In pre=roll time. Just do it.
\r
6478 nextReadPointer = safeReadPointer - bufferBytes;
\r
6479 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6481 endRead = nextReadPointer + bufferBytes;
\r
6484 else { // mode == INPUT
\r
6485 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6486 // See comments for playback.
\r
6487 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6488 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6489 if ( millis < 1.0 ) millis = 1.0;
\r
6490 Sleep( (DWORD) millis );
\r
6492 // Wake up and find out where we are now.
\r
6493 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6494 if ( FAILED( result ) ) {
\r
6495 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6496 errorText_ = errorStream_.str();
\r
6497 error( RtAudioError::SYSTEM_ERROR );
\r
6501 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6505 // Lock free space in the buffer
\r
6506 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6507 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6508 if ( FAILED( result ) ) {
\r
6509 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6510 errorText_ = errorStream_.str();
\r
6511 error( RtAudioError::SYSTEM_ERROR );
\r
6515 if ( duplexPrerollBytes <= 0 ) {
\r
6516 // Copy our buffer into the DS buffer
\r
6517 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6518 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6521 memset( buffer, 0, bufferSize1 );
\r
6522 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6523 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6526 // Update our buffer offset and unlock sound buffer
\r
6527 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6528 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6529 if ( FAILED( result ) ) {
\r
6530 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6531 errorText_ = errorStream_.str();
\r
6532 error( RtAudioError::SYSTEM_ERROR );
\r
6535 handle->bufferPointer[1] = nextReadPointer;
\r
6537 // No byte swapping necessary in DirectSound implementation.
\r
6539 // If necessary, convert 8-bit data from unsigned to signed.
\r
6540 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6541 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6543 // Do buffer conversion if necessary.
\r
6544 if ( stream_.doConvertBuffer[1] )
\r
6545 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6549 RtApi::tickStreamTime();
\r
6552 // Definitions for utility functions and callbacks
\r
6553 // specific to the DirectSound implementation.
\r
6555 static unsigned __stdcall callbackHandler( void *ptr )
\r
6557 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6558 RtApiDs *object = (RtApiDs *) info->object;
\r
6559 bool* isRunning = &info->isRunning;
\r
6561 while ( *isRunning == true ) {
\r
6562 object->callbackEvent();
\r
6565 _endthreadex( 0 );
\r
6569 #include "tchar.h"
\r
6571 static std::string convertTChar( LPCTSTR name )
\r
6573 #if defined( UNICODE ) || defined( _UNICODE )
\r
6574 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6575 std::string s( length-1, '\0' );
\r
6576 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6578 std::string s( name );
\r
6584 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6585 LPCTSTR description,
\r
6586 LPCTSTR /*module*/,
\r
6587 LPVOID lpContext )
\r
6589 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6590 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6593 bool validDevice = false;
\r
6594 if ( probeInfo.isInput == true ) {
\r
6596 LPDIRECTSOUNDCAPTURE object;
\r
6598 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6599 if ( hr != DS_OK ) return TRUE;
\r
6601 caps.dwSize = sizeof(caps);
\r
6602 hr = object->GetCaps( &caps );
\r
6603 if ( hr == DS_OK ) {
\r
6604 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6605 validDevice = true;
\r
6607 object->Release();
\r
6611 LPDIRECTSOUND object;
\r
6612 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6613 if ( hr != DS_OK ) return TRUE;
\r
6615 caps.dwSize = sizeof(caps);
\r
6616 hr = object->GetCaps( &caps );
\r
6617 if ( hr == DS_OK ) {
\r
6618 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6619 validDevice = true;
\r
6621 object->Release();
\r
6624 // If good device, then save its name and guid.
\r
6625 std::string name = convertTChar( description );
\r
6626 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6627 if ( lpguid == NULL )
\r
6628 name = "Default Device";
\r
6629 if ( validDevice ) {
\r
6630 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6631 if ( dsDevices[i].name == name ) {
\r
6632 dsDevices[i].found = true;
\r
6633 if ( probeInfo.isInput ) {
\r
6634 dsDevices[i].id[1] = lpguid;
\r
6635 dsDevices[i].validId[1] = true;
\r
6638 dsDevices[i].id[0] = lpguid;
\r
6639 dsDevices[i].validId[0] = true;
\r
6646 device.name = name;
\r
6647 device.found = true;
\r
6648 if ( probeInfo.isInput ) {
\r
6649 device.id[1] = lpguid;
\r
6650 device.validId[1] = true;
\r
6653 device.id[0] = lpguid;
\r
6654 device.validId[0] = true;
\r
6656 dsDevices.push_back( device );
\r
6662 static const char* getErrorString( int code )
\r
6666 case DSERR_ALLOCATED:
\r
6667 return "Already allocated";
\r
6669 case DSERR_CONTROLUNAVAIL:
\r
6670 return "Control unavailable";
\r
6672 case DSERR_INVALIDPARAM:
\r
6673 return "Invalid parameter";
\r
6675 case DSERR_INVALIDCALL:
\r
6676 return "Invalid call";
\r
6678 case DSERR_GENERIC:
\r
6679 return "Generic error";
\r
6681 case DSERR_PRIOLEVELNEEDED:
\r
6682 return "Priority level needed";
\r
6684 case DSERR_OUTOFMEMORY:
\r
6685 return "Out of memory";
\r
6687 case DSERR_BADFORMAT:
\r
6688 return "The sample rate or the channel format is not supported";
\r
6690 case DSERR_UNSUPPORTED:
\r
6691 return "Not supported";
\r
6693 case DSERR_NODRIVER:
\r
6694 return "No driver";
\r
6696 case DSERR_ALREADYINITIALIZED:
\r
6697 return "Already initialized";
\r
6699 case DSERR_NOAGGREGATION:
\r
6700 return "No aggregation";
\r
6702 case DSERR_BUFFERLOST:
\r
6703 return "Buffer lost";
\r
6705 case DSERR_OTHERAPPHASPRIO:
\r
6706 return "Another application already has priority";
\r
6708 case DSERR_UNINITIALIZED:
\r
6709 return "Uninitialized";
\r
6712 return "DirectSound unknown error";
\r
6715 //******************** End of __WINDOWS_DS__ *********************//
\r
6719 #if defined(__LINUX_ALSA__)
\r
6721 #include <alsa/asoundlib.h>
\r
6722 #include <unistd.h>
\r
6724 // A structure to hold various information related to the ALSA API
\r
6725 // implementation.
\r
6726 struct AlsaHandle {
\r
6727 snd_pcm_t *handles[2];
\r
6728 bool synchronized;
\r
6730 pthread_cond_t runnable_cv;
\r
6734 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6737 static void *alsaCallbackHandler( void * ptr );
\r
6739 RtApiAlsa :: RtApiAlsa()
\r
6741 // Nothing to do here.
\r
6744 RtApiAlsa :: ~RtApiAlsa()
\r
6746 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6749 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6751 unsigned nDevices = 0;
\r
6752 int result, subdevice, card;
\r
6754 snd_ctl_t *handle;
\r
6756 // Count cards and devices
\r
6758 snd_card_next( &card );
\r
6759 while ( card >= 0 ) {
\r
6760 sprintf( name, "hw:%d", card );
\r
6761 result = snd_ctl_open( &handle, name, 0 );
\r
6762 if ( result < 0 ) {
\r
6763 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6764 errorText_ = errorStream_.str();
\r
6765 error( RtAudioError::WARNING );
\r
6770 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6771 if ( result < 0 ) {
\r
6772 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6773 errorText_ = errorStream_.str();
\r
6774 error( RtAudioError::WARNING );
\r
6777 if ( subdevice < 0 )
\r
6782 snd_ctl_close( handle );
\r
6783 snd_card_next( &card );
\r
6786 result = snd_ctl_open( &handle, "default", 0 );
\r
6787 if (result == 0) {
\r
6789 snd_ctl_close( handle );
\r
6795 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6797 RtAudio::DeviceInfo info;
\r
6798 info.probed = false;
\r
6800 unsigned nDevices = 0;
\r
6801 int result, subdevice, card;
\r
6803 snd_ctl_t *chandle;
\r
6805 // Count cards and devices
\r
6807 snd_card_next( &card );
\r
6808 while ( card >= 0 ) {
\r
6809 sprintf( name, "hw:%d", card );
\r
6810 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6811 if ( result < 0 ) {
\r
6812 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6813 errorText_ = errorStream_.str();
\r
6814 error( RtAudioError::WARNING );
\r
6819 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6820 if ( result < 0 ) {
\r
6821 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6822 errorText_ = errorStream_.str();
\r
6823 error( RtAudioError::WARNING );
\r
6826 if ( subdevice < 0 ) break;
\r
6827 if ( nDevices == device ) {
\r
6828 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6834 snd_ctl_close( chandle );
\r
6835 snd_card_next( &card );
\r
6838 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6839 if ( result == 0 ) {
\r
6840 if ( nDevices == device ) {
\r
6841 strcpy( name, "default" );
\r
6847 if ( nDevices == 0 ) {
\r
6848 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6849 error( RtAudioError::INVALID_USE );
\r
6853 if ( device >= nDevices ) {
\r
6854 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6855 error( RtAudioError::INVALID_USE );
\r
6861 // If a stream is already open, we cannot probe the stream devices.
\r
6862 // Thus, use the saved results.
\r
6863 if ( stream_.state != STREAM_CLOSED &&
\r
6864 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6865 snd_ctl_close( chandle );
\r
6866 if ( device >= devices_.size() ) {
\r
6867 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6868 error( RtAudioError::WARNING );
\r
6871 return devices_[ device ];
\r
6874 int openMode = SND_PCM_ASYNC;
\r
6875 snd_pcm_stream_t stream;
\r
6876 snd_pcm_info_t *pcminfo;
\r
6877 snd_pcm_info_alloca( &pcminfo );
\r
6878 snd_pcm_t *phandle;
\r
6879 snd_pcm_hw_params_t *params;
\r
6880 snd_pcm_hw_params_alloca( ¶ms );
\r
6882 // First try for playback unless default device (which has subdev -1)
\r
6883 stream = SND_PCM_STREAM_PLAYBACK;
\r
6884 snd_pcm_info_set_stream( pcminfo, stream );
\r
6885 if ( subdevice != -1 ) {
\r
6886 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6887 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6889 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6890 if ( result < 0 ) {
\r
6891 // Device probably doesn't support playback.
\r
6892 goto captureProbe;
\r
6896 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6897 if ( result < 0 ) {
\r
6898 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6899 errorText_ = errorStream_.str();
\r
6900 error( RtAudioError::WARNING );
\r
6901 goto captureProbe;
\r
6904 // The device is open ... fill the parameter structure.
\r
6905 result = snd_pcm_hw_params_any( phandle, params );
\r
6906 if ( result < 0 ) {
\r
6907 snd_pcm_close( phandle );
\r
6908 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6909 errorText_ = errorStream_.str();
\r
6910 error( RtAudioError::WARNING );
\r
6911 goto captureProbe;
\r
6914 // Get output channel information.
\r
6915 unsigned int value;
\r
6916 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6917 if ( result < 0 ) {
\r
6918 snd_pcm_close( phandle );
\r
6919 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6920 errorText_ = errorStream_.str();
\r
6921 error( RtAudioError::WARNING );
\r
6922 goto captureProbe;
\r
6924 info.outputChannels = value;
\r
6925 snd_pcm_close( phandle );
\r
6928 stream = SND_PCM_STREAM_CAPTURE;
\r
6929 snd_pcm_info_set_stream( pcminfo, stream );
\r
6931 // Now try for capture unless default device (with subdev = -1)
\r
6932 if ( subdevice != -1 ) {
\r
6933 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6934 snd_ctl_close( chandle );
\r
6935 if ( result < 0 ) {
\r
6936 // Device probably doesn't support capture.
\r
6937 if ( info.outputChannels == 0 ) return info;
\r
6938 goto probeParameters;
\r
6942 snd_ctl_close( chandle );
\r
6944 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6945 if ( result < 0 ) {
\r
6946 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6947 errorText_ = errorStream_.str();
\r
6948 error( RtAudioError::WARNING );
\r
6949 if ( info.outputChannels == 0 ) return info;
\r
6950 goto probeParameters;
\r
6953 // The device is open ... fill the parameter structure.
\r
6954 result = snd_pcm_hw_params_any( phandle, params );
\r
6955 if ( result < 0 ) {
\r
6956 snd_pcm_close( phandle );
\r
6957 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6958 errorText_ = errorStream_.str();
\r
6959 error( RtAudioError::WARNING );
\r
6960 if ( info.outputChannels == 0 ) return info;
\r
6961 goto probeParameters;
\r
6964 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6965 if ( result < 0 ) {
\r
6966 snd_pcm_close( phandle );
\r
6967 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6968 errorText_ = errorStream_.str();
\r
6969 error( RtAudioError::WARNING );
\r
6970 if ( info.outputChannels == 0 ) return info;
\r
6971 goto probeParameters;
\r
6973 info.inputChannels = value;
\r
6974 snd_pcm_close( phandle );
\r
6976 // If device opens for both playback and capture, we determine the channels.
\r
6977 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6978 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6980 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6981 if ( device == 0 && info.outputChannels > 0 )
\r
6982 info.isDefaultOutput = true;
\r
6983 if ( device == 0 && info.inputChannels > 0 )
\r
6984 info.isDefaultInput = true;
\r
6987 // At this point, we just need to figure out the supported data
\r
6988 // formats and sample rates. We'll proceed by opening the device in
\r
6989 // the direction with the maximum number of channels, or playback if
\r
6990 // they are equal. This might limit our sample rate options, but so
\r
6993 if ( info.outputChannels >= info.inputChannels )
\r
6994 stream = SND_PCM_STREAM_PLAYBACK;
\r
6996 stream = SND_PCM_STREAM_CAPTURE;
\r
6997 snd_pcm_info_set_stream( pcminfo, stream );
\r
6999 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7000 if ( result < 0 ) {
\r
7001 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7002 errorText_ = errorStream_.str();
\r
7003 error( RtAudioError::WARNING );
\r
7007 // The device is open ... fill the parameter structure.
\r
7008 result = snd_pcm_hw_params_any( phandle, params );
\r
7009 if ( result < 0 ) {
\r
7010 snd_pcm_close( phandle );
\r
7011 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7012 errorText_ = errorStream_.str();
\r
7013 error( RtAudioError::WARNING );
\r
7017 // Test our discrete set of sample rate values.
\r
7018 info.sampleRates.clear();
\r
7019 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7020 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7021 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7023 if ( info.sampleRates.size() == 0 ) {
\r
7024 snd_pcm_close( phandle );
\r
7025 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7026 errorText_ = errorStream_.str();
\r
7027 error( RtAudioError::WARNING );
\r
7031 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7032 snd_pcm_format_t format;
\r
7033 info.nativeFormats = 0;
\r
7034 format = SND_PCM_FORMAT_S8;
\r
7035 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7036 info.nativeFormats |= RTAUDIO_SINT8;
\r
7037 format = SND_PCM_FORMAT_S16;
\r
7038 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7039 info.nativeFormats |= RTAUDIO_SINT16;
\r
7040 format = SND_PCM_FORMAT_S24;
\r
7041 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7042 info.nativeFormats |= RTAUDIO_SINT24;
\r
7043 format = SND_PCM_FORMAT_S32;
\r
7044 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7045 info.nativeFormats |= RTAUDIO_SINT32;
\r
7046 format = SND_PCM_FORMAT_FLOAT;
\r
7047 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7048 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7049 format = SND_PCM_FORMAT_FLOAT64;
\r
7050 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7051 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7053 // Check that we have at least one supported format
\r
7054 if ( info.nativeFormats == 0 ) {
\r
7055 snd_pcm_close( phandle );
\r
7056 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7057 errorText_ = errorStream_.str();
\r
7058 error( RtAudioError::WARNING );
\r
7062 // Get the device name
\r
7064 result = snd_card_get_name( card, &cardname );
\r
7065 if ( result >= 0 ) {
\r
7066 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7071 // That's all ... close the device and return
\r
7072 snd_pcm_close( phandle );
\r
7073 info.probed = true;
\r
7077 void RtApiAlsa :: saveDeviceInfo( void )
\r
7081 unsigned int nDevices = getDeviceCount();
\r
7082 devices_.resize( nDevices );
\r
7083 for ( unsigned int i=0; i<nDevices; i++ )
\r
7084 devices_[i] = getDeviceInfo( i );
\r
7087 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7088 unsigned int firstChannel, unsigned int sampleRate,
\r
7089 RtAudioFormat format, unsigned int *bufferSize,
\r
7090 RtAudio::StreamOptions *options )
\r
7093 #if defined(__RTAUDIO_DEBUG__)
\r
7094 snd_output_t *out;
\r
7095 snd_output_stdio_attach(&out, stderr, 0);
\r
7098 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7100 unsigned nDevices = 0;
\r
7101 int result, subdevice, card;
\r
7103 snd_ctl_t *chandle;
\r
7105 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7106 snprintf(name, sizeof(name), "%s", "default");
\r
7108 // Count cards and devices
\r
7110 snd_card_next( &card );
\r
7111 while ( card >= 0 ) {
\r
7112 sprintf( name, "hw:%d", card );
\r
7113 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7114 if ( result < 0 ) {
\r
7115 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7116 errorText_ = errorStream_.str();
\r
7121 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7122 if ( result < 0 ) break;
\r
7123 if ( subdevice < 0 ) break;
\r
7124 if ( nDevices == device ) {
\r
7125 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7126 snd_ctl_close( chandle );
\r
7131 snd_ctl_close( chandle );
\r
7132 snd_card_next( &card );
\r
7135 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7136 if ( result == 0 ) {
\r
7137 if ( nDevices == device ) {
\r
7138 strcpy( name, "default" );
\r
7144 if ( nDevices == 0 ) {
\r
7145 // This should not happen because a check is made before this function is called.
\r
7146 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7150 if ( device >= nDevices ) {
\r
7151 // This should not happen because a check is made before this function is called.
\r
7152 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7159 // The getDeviceInfo() function will not work for a device that is
\r
7160 // already open. Thus, we'll probe the system before opening a
\r
7161 // stream and save the results for use by getDeviceInfo().
\r
7162 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7163 this->saveDeviceInfo();
\r
7165 snd_pcm_stream_t stream;
\r
7166 if ( mode == OUTPUT )
\r
7167 stream = SND_PCM_STREAM_PLAYBACK;
\r
7169 stream = SND_PCM_STREAM_CAPTURE;
\r
7171 snd_pcm_t *phandle;
\r
7172 int openMode = SND_PCM_ASYNC;
\r
7173 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7174 if ( result < 0 ) {
\r
7175 if ( mode == OUTPUT )
\r
7176 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7178 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7179 errorText_ = errorStream_.str();
\r
7183 // Fill the parameter structure.
\r
7184 snd_pcm_hw_params_t *hw_params;
\r
7185 snd_pcm_hw_params_alloca( &hw_params );
\r
7186 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7187 if ( result < 0 ) {
\r
7188 snd_pcm_close( phandle );
\r
7189 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7190 errorText_ = errorStream_.str();
\r
7194 #if defined(__RTAUDIO_DEBUG__)
\r
7195 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7196 snd_pcm_hw_params_dump( hw_params, out );
\r
7199 // Set access ... check user preference.
\r
7200 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7201 stream_.userInterleaved = false;
\r
7202 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7203 if ( result < 0 ) {
\r
7204 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7205 stream_.deviceInterleaved[mode] = true;
\r
7208 stream_.deviceInterleaved[mode] = false;
\r
7211 stream_.userInterleaved = true;
\r
7212 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7213 if ( result < 0 ) {
\r
7214 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7215 stream_.deviceInterleaved[mode] = false;
\r
7218 stream_.deviceInterleaved[mode] = true;
\r
7221 if ( result < 0 ) {
\r
7222 snd_pcm_close( phandle );
\r
7223 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7224 errorText_ = errorStream_.str();
\r
7228 // Determine how to set the device format.
\r
7229 stream_.userFormat = format;
\r
7230 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7232 if ( format == RTAUDIO_SINT8 )
\r
7233 deviceFormat = SND_PCM_FORMAT_S8;
\r
7234 else if ( format == RTAUDIO_SINT16 )
\r
7235 deviceFormat = SND_PCM_FORMAT_S16;
\r
7236 else if ( format == RTAUDIO_SINT24 )
\r
7237 deviceFormat = SND_PCM_FORMAT_S24;
\r
7238 else if ( format == RTAUDIO_SINT32 )
\r
7239 deviceFormat = SND_PCM_FORMAT_S32;
\r
7240 else if ( format == RTAUDIO_FLOAT32 )
\r
7241 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7242 else if ( format == RTAUDIO_FLOAT64 )
\r
7243 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7245 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7246 stream_.deviceFormat[mode] = format;
\r
7250 // The user requested format is not natively supported by the device.
\r
7251 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7252 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7253 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7257 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7258 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7259 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7263 deviceFormat = SND_PCM_FORMAT_S32;
\r
7264 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7265 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7269 deviceFormat = SND_PCM_FORMAT_S24;
\r
7270 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7271 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7275 deviceFormat = SND_PCM_FORMAT_S16;
\r
7276 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7277 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7281 deviceFormat = SND_PCM_FORMAT_S8;
\r
7282 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7283 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7287 // If we get here, no supported format was found.
\r
7288 snd_pcm_close( phandle );
\r
7289 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7290 errorText_ = errorStream_.str();
\r
7294 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7295 if ( result < 0 ) {
\r
7296 snd_pcm_close( phandle );
\r
7297 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7298 errorText_ = errorStream_.str();
\r
7302 // Determine whether byte-swaping is necessary.
\r
7303 stream_.doByteSwap[mode] = false;
\r
7304 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7305 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7306 if ( result == 0 )
\r
7307 stream_.doByteSwap[mode] = true;
\r
7308 else if (result < 0) {
\r
7309 snd_pcm_close( phandle );
\r
7310 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7311 errorText_ = errorStream_.str();
\r
7316 // Set the sample rate.
\r
7317 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7318 if ( result < 0 ) {
\r
7319 snd_pcm_close( phandle );
\r
7320 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7321 errorText_ = errorStream_.str();
\r
7325 // Determine the number of channels for this device. We support a possible
\r
7326 // minimum device channel number > than the value requested by the user.
\r
7327 stream_.nUserChannels[mode] = channels;
\r
7328 unsigned int value;
\r
7329 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7330 unsigned int deviceChannels = value;
\r
7331 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7332 snd_pcm_close( phandle );
\r
7333 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7334 errorText_ = errorStream_.str();
\r
7338 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7339 if ( result < 0 ) {
\r
7340 snd_pcm_close( phandle );
\r
7341 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7342 errorText_ = errorStream_.str();
\r
7345 deviceChannels = value;
\r
7346 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7347 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7349 // Set the device channels.
\r
7350 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7351 if ( result < 0 ) {
\r
7352 snd_pcm_close( phandle );
\r
7353 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7354 errorText_ = errorStream_.str();
\r
7358 // Set the buffer (or period) size.
\r
7360 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7361 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7362 if ( result < 0 ) {
\r
7363 snd_pcm_close( phandle );
\r
7364 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7365 errorText_ = errorStream_.str();
\r
7368 *bufferSize = periodSize;
\r
7370 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7371 unsigned int periods = 0;
\r
7372 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7373 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7374 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7375 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7376 if ( result < 0 ) {
\r
7377 snd_pcm_close( phandle );
\r
7378 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7379 errorText_ = errorStream_.str();
\r
7383 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7384 // MUST be the same in both directions!
\r
7385 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7386 snd_pcm_close( phandle );
\r
7387 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7388 errorText_ = errorStream_.str();
\r
7392 stream_.bufferSize = *bufferSize;
\r
7394 // Install the hardware configuration
\r
7395 result = snd_pcm_hw_params( phandle, hw_params );
\r
7396 if ( result < 0 ) {
\r
7397 snd_pcm_close( phandle );
\r
7398 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7399 errorText_ = errorStream_.str();
\r
7403 #if defined(__RTAUDIO_DEBUG__)
\r
7404 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7405 snd_pcm_hw_params_dump( hw_params, out );
\r
7408 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7409 snd_pcm_sw_params_t *sw_params = NULL;
\r
7410 snd_pcm_sw_params_alloca( &sw_params );
\r
7411 snd_pcm_sw_params_current( phandle, sw_params );
\r
7412 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7413 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7414 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7416 // The following two settings were suggested by Theo Veenker
\r
7417 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7418 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7420 // here are two options for a fix
\r
7421 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7422 snd_pcm_uframes_t val;
\r
7423 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7424 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7426 result = snd_pcm_sw_params( phandle, sw_params );
\r
7427 if ( result < 0 ) {
\r
7428 snd_pcm_close( phandle );
\r
7429 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7430 errorText_ = errorStream_.str();
\r
7434 #if defined(__RTAUDIO_DEBUG__)
\r
7435 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7436 snd_pcm_sw_params_dump( sw_params, out );
\r
7439 // Set flags for buffer conversion
\r
7440 stream_.doConvertBuffer[mode] = false;
\r
7441 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7442 stream_.doConvertBuffer[mode] = true;
\r
7443 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7444 stream_.doConvertBuffer[mode] = true;
\r
7445 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7446 stream_.nUserChannels[mode] > 1 )
\r
7447 stream_.doConvertBuffer[mode] = true;
\r
7449 // Allocate the ApiHandle if necessary and then save.
\r
7450 AlsaHandle *apiInfo = 0;
\r
7451 if ( stream_.apiHandle == 0 ) {
\r
7453 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7455 catch ( std::bad_alloc& ) {
\r
7456 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7460 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7461 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7465 stream_.apiHandle = (void *) apiInfo;
\r
7466 apiInfo->handles[0] = 0;
\r
7467 apiInfo->handles[1] = 0;
\r
7470 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7472 apiInfo->handles[mode] = phandle;
\r
7475 // Allocate necessary internal buffers.
\r
7476 unsigned long bufferBytes;
\r
7477 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7478 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7479 if ( stream_.userBuffer[mode] == NULL ) {
\r
7480 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7484 if ( stream_.doConvertBuffer[mode] ) {
\r
7486 bool makeBuffer = true;
\r
7487 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7488 if ( mode == INPUT ) {
\r
7489 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7490 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7491 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7495 if ( makeBuffer ) {
\r
7496 bufferBytes *= *bufferSize;
\r
7497 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7498 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7499 if ( stream_.deviceBuffer == NULL ) {
\r
7500 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7506 stream_.sampleRate = sampleRate;
\r
7507 stream_.nBuffers = periods;
\r
7508 stream_.device[mode] = device;
\r
7509 stream_.state = STREAM_STOPPED;
\r
7511 // Setup the buffer conversion information structure.
\r
7512 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7514 // Setup thread if necessary.
\r
7515 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7516 // We had already set up an output stream.
\r
7517 stream_.mode = DUPLEX;
\r
7518 // Link the streams if possible.
\r
7519 apiInfo->synchronized = false;
\r
7520 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7521 apiInfo->synchronized = true;
\r
7523 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7524 error( RtAudioError::WARNING );
\r
7528 stream_.mode = mode;
\r
7530 // Setup callback thread.
\r
7531 stream_.callbackInfo.object = (void *) this;
\r
7533 // Set the thread attributes for joinable and realtime scheduling
\r
7534 // priority (optional). The higher priority will only take affect
\r
7535 // if the program is run as root or suid. Note, under Linux
\r
7536 // processes with CAP_SYS_NICE privilege, a user can change
\r
7537 // scheduling policy and priority (thus need not be root). See
\r
7538 // POSIX "capabilities".
\r
7539 pthread_attr_t attr;
\r
7540 pthread_attr_init( &attr );
\r
7541 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7543 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7544 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7545 // We previously attempted to increase the audio callback priority
\r
7546 // to SCHED_RR here via the attributes. However, while no errors
\r
7547 // were reported in doing so, it did not work. So, now this is
\r
7548 // done in the alsaCallbackHandler function.
\r
7549 stream_.callbackInfo.doRealtime = true;
\r
7550 int priority = options->priority;
\r
7551 int min = sched_get_priority_min( SCHED_RR );
\r
7552 int max = sched_get_priority_max( SCHED_RR );
\r
7553 if ( priority < min ) priority = min;
\r
7554 else if ( priority > max ) priority = max;
\r
7555 stream_.callbackInfo.priority = priority;
\r
7559 stream_.callbackInfo.isRunning = true;
\r
7560 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7561 pthread_attr_destroy( &attr );
\r
7563 stream_.callbackInfo.isRunning = false;
\r
7564 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7573 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7574 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7575 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7577 stream_.apiHandle = 0;
\r
7580 if ( phandle) snd_pcm_close( phandle );
\r
7582 for ( int i=0; i<2; i++ ) {
\r
7583 if ( stream_.userBuffer[i] ) {
\r
7584 free( stream_.userBuffer[i] );
\r
7585 stream_.userBuffer[i] = 0;
\r
7589 if ( stream_.deviceBuffer ) {
\r
7590 free( stream_.deviceBuffer );
\r
7591 stream_.deviceBuffer = 0;
\r
7594 stream_.state = STREAM_CLOSED;
\r
7598 void RtApiAlsa :: closeStream()
\r
7600 if ( stream_.state == STREAM_CLOSED ) {
\r
7601 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7602 error( RtAudioError::WARNING );
\r
7606 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7607 stream_.callbackInfo.isRunning = false;
\r
7608 MUTEX_LOCK( &stream_.mutex );
\r
7609 if ( stream_.state == STREAM_STOPPED ) {
\r
7610 apiInfo->runnable = true;
\r
7611 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7613 MUTEX_UNLOCK( &stream_.mutex );
\r
7614 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7616 if ( stream_.state == STREAM_RUNNING ) {
\r
7617 stream_.state = STREAM_STOPPED;
\r
7618 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7619 snd_pcm_drop( apiInfo->handles[0] );
\r
7620 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7621 snd_pcm_drop( apiInfo->handles[1] );
\r
7625 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7626 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7627 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7629 stream_.apiHandle = 0;
\r
7632 for ( int i=0; i<2; i++ ) {
\r
7633 if ( stream_.userBuffer[i] ) {
\r
7634 free( stream_.userBuffer[i] );
\r
7635 stream_.userBuffer[i] = 0;
\r
7639 if ( stream_.deviceBuffer ) {
\r
7640 free( stream_.deviceBuffer );
\r
7641 stream_.deviceBuffer = 0;
\r
7644 stream_.mode = UNINITIALIZED;
\r
7645 stream_.state = STREAM_CLOSED;
\r
7648 void RtApiAlsa :: startStream()
\r
7650 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7653 if ( stream_.state == STREAM_RUNNING ) {
\r
7654 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7655 error( RtAudioError::WARNING );
\r
7659 MUTEX_LOCK( &stream_.mutex );
\r
7662 snd_pcm_state_t state;
\r
7663 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7664 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7665 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7666 state = snd_pcm_state( handle[0] );
\r
7667 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7668 result = snd_pcm_prepare( handle[0] );
\r
7669 if ( result < 0 ) {
\r
7670 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7671 errorText_ = errorStream_.str();
\r
7677 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7678 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7679 state = snd_pcm_state( handle[1] );
\r
7680 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7681 result = snd_pcm_prepare( handle[1] );
\r
7682 if ( result < 0 ) {
\r
7683 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7684 errorText_ = errorStream_.str();
\r
7690 stream_.state = STREAM_RUNNING;
\r
7693 apiInfo->runnable = true;
\r
7694 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7695 MUTEX_UNLOCK( &stream_.mutex );
\r
7697 if ( result >= 0 ) return;
\r
7698 error( RtAudioError::SYSTEM_ERROR );
\r
7701 void RtApiAlsa :: stopStream()
\r
7704 if ( stream_.state == STREAM_STOPPED ) {
\r
7705 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7706 error( RtAudioError::WARNING );
\r
7710 stream_.state = STREAM_STOPPED;
\r
7711 MUTEX_LOCK( &stream_.mutex );
\r
7714 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7715 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7716 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7717 if ( apiInfo->synchronized )
\r
7718 result = snd_pcm_drop( handle[0] );
\r
7720 result = snd_pcm_drain( handle[0] );
\r
7721 if ( result < 0 ) {
\r
7722 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7723 errorText_ = errorStream_.str();
\r
7728 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7729 result = snd_pcm_drop( handle[1] );
\r
7730 if ( result < 0 ) {
\r
7731 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7732 errorText_ = errorStream_.str();
\r
7738 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7739 MUTEX_UNLOCK( &stream_.mutex );
\r
7741 if ( result >= 0 ) return;
\r
7742 error( RtAudioError::SYSTEM_ERROR );
\r
7745 void RtApiAlsa :: abortStream()
\r
7748 if ( stream_.state == STREAM_STOPPED ) {
\r
7749 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7750 error( RtAudioError::WARNING );
\r
7754 stream_.state = STREAM_STOPPED;
\r
7755 MUTEX_LOCK( &stream_.mutex );
\r
7758 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7759 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7760 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7761 result = snd_pcm_drop( handle[0] );
\r
7762 if ( result < 0 ) {
\r
7763 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7764 errorText_ = errorStream_.str();
\r
7769 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7770 result = snd_pcm_drop( handle[1] );
\r
7771 if ( result < 0 ) {
\r
7772 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7773 errorText_ = errorStream_.str();
\r
7779 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7780 MUTEX_UNLOCK( &stream_.mutex );
\r
7782 if ( result >= 0 ) return;
\r
7783 error( RtAudioError::SYSTEM_ERROR );
\r
7786 void RtApiAlsa :: callbackEvent()
\r
7788 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7789 if ( stream_.state == STREAM_STOPPED ) {
\r
7790 MUTEX_LOCK( &stream_.mutex );
\r
7791 while ( !apiInfo->runnable )
\r
7792 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7794 if ( stream_.state != STREAM_RUNNING ) {
\r
7795 MUTEX_UNLOCK( &stream_.mutex );
\r
7798 MUTEX_UNLOCK( &stream_.mutex );
\r
7801 if ( stream_.state == STREAM_CLOSED ) {
\r
7802 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7803 error( RtAudioError::WARNING );
\r
7807 int doStopStream = 0;
\r
7808 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7809 double streamTime = getStreamTime();
\r
7810 RtAudioStreamStatus status = 0;
\r
7811 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7812 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7813 apiInfo->xrun[0] = false;
\r
7815 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7816 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7817 apiInfo->xrun[1] = false;
\r
7819 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7820 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7822 if ( doStopStream == 2 ) {
\r
7827 MUTEX_LOCK( &stream_.mutex );
\r
7829 // The state might change while waiting on a mutex.
\r
7830 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7835 snd_pcm_t **handle;
\r
7836 snd_pcm_sframes_t frames;
\r
7837 RtAudioFormat format;
\r
7838 handle = (snd_pcm_t **) apiInfo->handles;
\r
7840 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7842 // Setup parameters.
\r
7843 if ( stream_.doConvertBuffer[1] ) {
\r
7844 buffer = stream_.deviceBuffer;
\r
7845 channels = stream_.nDeviceChannels[1];
\r
7846 format = stream_.deviceFormat[1];
\r
7849 buffer = stream_.userBuffer[1];
\r
7850 channels = stream_.nUserChannels[1];
\r
7851 format = stream_.userFormat;
\r
7854 // Read samples from device in interleaved/non-interleaved format.
\r
7855 if ( stream_.deviceInterleaved[1] )
\r
7856 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7858 void *bufs[channels];
\r
7859 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7860 for ( int i=0; i<channels; i++ )
\r
7861 bufs[i] = (void *) (buffer + (i * offset));
\r
7862 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7865 if ( result < (int) stream_.bufferSize ) {
\r
7866 // Either an error or overrun occured.
\r
7867 if ( result == -EPIPE ) {
\r
7868 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7869 if ( state == SND_PCM_STATE_XRUN ) {
\r
7870 apiInfo->xrun[1] = true;
\r
7871 result = snd_pcm_prepare( handle[1] );
\r
7872 if ( result < 0 ) {
\r
7873 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7874 errorText_ = errorStream_.str();
\r
7878 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7879 errorText_ = errorStream_.str();
\r
7883 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7884 errorText_ = errorStream_.str();
\r
7886 error( RtAudioError::WARNING );
\r
7890 // Do byte swapping if necessary.
\r
7891 if ( stream_.doByteSwap[1] )
\r
7892 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7894 // Do buffer conversion if necessary.
\r
7895 if ( stream_.doConvertBuffer[1] )
\r
7896 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7898 // Check stream latency
\r
7899 result = snd_pcm_delay( handle[1], &frames );
\r
7900 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7905 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7907 // Setup parameters and do buffer conversion if necessary.
\r
7908 if ( stream_.doConvertBuffer[0] ) {
\r
7909 buffer = stream_.deviceBuffer;
\r
7910 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7911 channels = stream_.nDeviceChannels[0];
\r
7912 format = stream_.deviceFormat[0];
\r
7915 buffer = stream_.userBuffer[0];
\r
7916 channels = stream_.nUserChannels[0];
\r
7917 format = stream_.userFormat;
\r
7920 // Do byte swapping if necessary.
\r
7921 if ( stream_.doByteSwap[0] )
\r
7922 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7924 // Write samples to device in interleaved/non-interleaved format.
\r
7925 if ( stream_.deviceInterleaved[0] )
\r
7926 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7928 void *bufs[channels];
\r
7929 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7930 for ( int i=0; i<channels; i++ )
\r
7931 bufs[i] = (void *) (buffer + (i * offset));
\r
7932 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7935 if ( result < (int) stream_.bufferSize ) {
\r
7936 // Either an error or underrun occured.
\r
7937 if ( result == -EPIPE ) {
\r
7938 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7939 if ( state == SND_PCM_STATE_XRUN ) {
\r
7940 apiInfo->xrun[0] = true;
\r
7941 result = snd_pcm_prepare( handle[0] );
\r
7942 if ( result < 0 ) {
\r
7943 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7944 errorText_ = errorStream_.str();
\r
7948 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7949 errorText_ = errorStream_.str();
\r
7953 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7954 errorText_ = errorStream_.str();
\r
7956 error( RtAudioError::WARNING );
\r
7960 // Check stream latency
\r
7961 result = snd_pcm_delay( handle[0], &frames );
\r
7962 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7966 MUTEX_UNLOCK( &stream_.mutex );
\r
7968 RtApi::tickStreamTime();
\r
7969 if ( doStopStream == 1 ) this->stopStream();
\r
7972 static void *alsaCallbackHandler( void *ptr )
\r
7974 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7975 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7976 bool *isRunning = &info->isRunning;
\r
7978 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7979 if ( &info->doRealtime ) {
\r
7980 pthread_t tID = pthread_self(); // ID of this thread
\r
7981 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7982 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
7986 while ( *isRunning == true ) {
\r
7987 pthread_testcancel();
\r
7988 object->callbackEvent();
\r
7991 pthread_exit( NULL );
\r
7994 //******************** End of __LINUX_ALSA__ *********************//
\r
7997 #if defined(__LINUX_PULSE__)
\r
7999 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8000 // and Tristan Matthews.
\r
8002 #include <pulse/error.h>
\r
8003 #include <pulse/simple.h>
\r
8006 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8007 44100, 48000, 96000, 0};
\r
8009 struct rtaudio_pa_format_mapping_t {
\r
8010 RtAudioFormat rtaudio_format;
\r
8011 pa_sample_format_t pa_format;
\r
8014 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8015 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8016 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8017 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8018 {0, PA_SAMPLE_INVALID}};
\r
8020 struct PulseAudioHandle {
\r
8021 pa_simple *s_play;
\r
8024 pthread_cond_t runnable_cv;
\r
8026 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8029 RtApiPulse::~RtApiPulse()
\r
8031 if ( stream_.state != STREAM_CLOSED )
\r
8035 unsigned int RtApiPulse::getDeviceCount( void )
\r
8040 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8042 RtAudio::DeviceInfo info;
\r
8043 info.probed = true;
\r
8044 info.name = "PulseAudio";
\r
8045 info.outputChannels = 2;
\r
8046 info.inputChannels = 2;
\r
8047 info.duplexChannels = 2;
\r
8048 info.isDefaultOutput = true;
\r
8049 info.isDefaultInput = true;
\r
8051 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8052 info.sampleRates.push_back( *sr );
\r
8054 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8059 static void *pulseaudio_callback( void * user )
\r
8061 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8062 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8063 volatile bool *isRunning = &cbi->isRunning;
\r
8065 while ( *isRunning ) {
\r
8066 pthread_testcancel();
\r
8067 context->callbackEvent();
\r
8070 pthread_exit( NULL );
\r
8073 void RtApiPulse::closeStream( void )
\r
8075 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8077 stream_.callbackInfo.isRunning = false;
\r
8079 MUTEX_LOCK( &stream_.mutex );
\r
8080 if ( stream_.state == STREAM_STOPPED ) {
\r
8081 pah->runnable = true;
\r
8082 pthread_cond_signal( &pah->runnable_cv );
\r
8084 MUTEX_UNLOCK( &stream_.mutex );
\r
8086 pthread_join( pah->thread, 0 );
\r
8087 if ( pah->s_play ) {
\r
8088 pa_simple_flush( pah->s_play, NULL );
\r
8089 pa_simple_free( pah->s_play );
\r
8092 pa_simple_free( pah->s_rec );
\r
8094 pthread_cond_destroy( &pah->runnable_cv );
\r
8096 stream_.apiHandle = 0;
\r
8099 if ( stream_.userBuffer[0] ) {
\r
8100 free( stream_.userBuffer[0] );
\r
8101 stream_.userBuffer[0] = 0;
\r
8103 if ( stream_.userBuffer[1] ) {
\r
8104 free( stream_.userBuffer[1] );
\r
8105 stream_.userBuffer[1] = 0;
\r
8108 stream_.state = STREAM_CLOSED;
\r
8109 stream_.mode = UNINITIALIZED;
\r
8112 void RtApiPulse::callbackEvent( void )
\r
8114 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8116 if ( stream_.state == STREAM_STOPPED ) {
\r
8117 MUTEX_LOCK( &stream_.mutex );
\r
8118 while ( !pah->runnable )
\r
8119 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8121 if ( stream_.state != STREAM_RUNNING ) {
\r
8122 MUTEX_UNLOCK( &stream_.mutex );
\r
8125 MUTEX_UNLOCK( &stream_.mutex );
\r
8128 if ( stream_.state == STREAM_CLOSED ) {
\r
8129 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8130 "this shouldn't happen!";
\r
8131 error( RtAudioError::WARNING );
\r
8135 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8136 double streamTime = getStreamTime();
\r
8137 RtAudioStreamStatus status = 0;
\r
8138 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8139 stream_.bufferSize, streamTime, status,
\r
8140 stream_.callbackInfo.userData );
\r
8142 if ( doStopStream == 2 ) {
\r
8147 MUTEX_LOCK( &stream_.mutex );
\r
8148 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8149 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8151 if ( stream_.state != STREAM_RUNNING )
\r
8156 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8157 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8158 convertBuffer( stream_.deviceBuffer,
\r
8159 stream_.userBuffer[OUTPUT],
\r
8160 stream_.convertInfo[OUTPUT] );
\r
8161 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8162 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8164 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8165 formatBytes( stream_.userFormat );
\r
8167 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8168 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8169 pa_strerror( pa_error ) << ".";
\r
8170 errorText_ = errorStream_.str();
\r
8171 error( RtAudioError::WARNING );
\r
8175 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8176 if ( stream_.doConvertBuffer[INPUT] )
\r
8177 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8178 formatBytes( stream_.deviceFormat[INPUT] );
\r
8180 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8181 formatBytes( stream_.userFormat );
\r
8183 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8184 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8185 pa_strerror( pa_error ) << ".";
\r
8186 errorText_ = errorStream_.str();
\r
8187 error( RtAudioError::WARNING );
\r
8189 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8190 convertBuffer( stream_.userBuffer[INPUT],
\r
8191 stream_.deviceBuffer,
\r
8192 stream_.convertInfo[INPUT] );
\r
8197 MUTEX_UNLOCK( &stream_.mutex );
\r
8198 RtApi::tickStreamTime();
\r
8200 if ( doStopStream == 1 )
\r
8204 void RtApiPulse::startStream( void )
\r
8206 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8208 if ( stream_.state == STREAM_CLOSED ) {
\r
8209 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8210 error( RtAudioError::INVALID_USE );
\r
8213 if ( stream_.state == STREAM_RUNNING ) {
\r
8214 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8215 error( RtAudioError::WARNING );
\r
8219 MUTEX_LOCK( &stream_.mutex );
\r
8221 stream_.state = STREAM_RUNNING;
\r
8223 pah->runnable = true;
\r
8224 pthread_cond_signal( &pah->runnable_cv );
\r
8225 MUTEX_UNLOCK( &stream_.mutex );
\r
8228 void RtApiPulse::stopStream( void )
\r
8230 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8232 if ( stream_.state == STREAM_CLOSED ) {
\r
8233 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8234 error( RtAudioError::INVALID_USE );
\r
8237 if ( stream_.state == STREAM_STOPPED ) {
\r
8238 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8239 error( RtAudioError::WARNING );
\r
8243 stream_.state = STREAM_STOPPED;
\r
8244 MUTEX_LOCK( &stream_.mutex );
\r
8246 if ( pah && pah->s_play ) {
\r
8248 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8249 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8250 pa_strerror( pa_error ) << ".";
\r
8251 errorText_ = errorStream_.str();
\r
8252 MUTEX_UNLOCK( &stream_.mutex );
\r
8253 error( RtAudioError::SYSTEM_ERROR );
\r
8258 stream_.state = STREAM_STOPPED;
\r
8259 MUTEX_UNLOCK( &stream_.mutex );
\r
8262 void RtApiPulse::abortStream( void )
\r
8264 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8266 if ( stream_.state == STREAM_CLOSED ) {
\r
8267 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8268 error( RtAudioError::INVALID_USE );
\r
8271 if ( stream_.state == STREAM_STOPPED ) {
\r
8272 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8273 error( RtAudioError::WARNING );
\r
8277 stream_.state = STREAM_STOPPED;
\r
8278 MUTEX_LOCK( &stream_.mutex );
\r
8280 if ( pah && pah->s_play ) {
\r
8282 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8283 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8284 pa_strerror( pa_error ) << ".";
\r
8285 errorText_ = errorStream_.str();
\r
8286 MUTEX_UNLOCK( &stream_.mutex );
\r
8287 error( RtAudioError::SYSTEM_ERROR );
\r
8292 stream_.state = STREAM_STOPPED;
\r
8293 MUTEX_UNLOCK( &stream_.mutex );
\r
8296 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8297 unsigned int channels, unsigned int firstChannel,
\r
8298 unsigned int sampleRate, RtAudioFormat format,
\r
8299 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8301 PulseAudioHandle *pah = 0;
\r
8302 unsigned long bufferBytes = 0;
\r
8303 pa_sample_spec ss;
\r
8305 if ( device != 0 ) return false;
\r
8306 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8307 if ( channels != 1 && channels != 2 ) {
\r
8308 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8311 ss.channels = channels;
\r
8313 if ( firstChannel != 0 ) return false;
\r
8315 bool sr_found = false;
\r
8316 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8317 if ( sampleRate == *sr ) {
\r
8319 stream_.sampleRate = sampleRate;
\r
8320 ss.rate = sampleRate;
\r
8324 if ( !sr_found ) {
\r
8325 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8329 bool sf_found = 0;
\r
8330 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8331 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8332 if ( format == sf->rtaudio_format ) {
\r
8334 stream_.userFormat = sf->rtaudio_format;
\r
8335 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8336 ss.format = sf->pa_format;
\r
8340 if ( !sf_found ) { // Use internal data format conversion.
\r
8341 stream_.userFormat = format;
\r
8342 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8343 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8346 // Set other stream parameters.
\r
8347 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8348 else stream_.userInterleaved = true;
\r
8349 stream_.deviceInterleaved[mode] = true;
\r
8350 stream_.nBuffers = 1;
\r
8351 stream_.doByteSwap[mode] = false;
\r
8352 stream_.nUserChannels[mode] = channels;
\r
8353 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8354 stream_.channelOffset[mode] = 0;
\r
8355 std::string streamName = "RtAudio";
\r
8357 // Set flags for buffer conversion.
\r
8358 stream_.doConvertBuffer[mode] = false;
\r
8359 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8360 stream_.doConvertBuffer[mode] = true;
\r
8361 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8362 stream_.doConvertBuffer[mode] = true;
\r
8364 // Allocate necessary internal buffers.
\r
8365 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8366 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8367 if ( stream_.userBuffer[mode] == NULL ) {
\r
8368 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8371 stream_.bufferSize = *bufferSize;
\r
8373 if ( stream_.doConvertBuffer[mode] ) {
\r
8375 bool makeBuffer = true;
\r
8376 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8377 if ( mode == INPUT ) {
\r
8378 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8379 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8380 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8384 if ( makeBuffer ) {
\r
8385 bufferBytes *= *bufferSize;
\r
8386 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8387 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8388 if ( stream_.deviceBuffer == NULL ) {
\r
8389 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8395 stream_.device[mode] = device;
\r
8397 // Setup the buffer conversion information structure.
\r
8398 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8400 if ( !stream_.apiHandle ) {
\r
8401 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8403 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8407 stream_.apiHandle = pah;
\r
8408 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8409 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8413 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8416 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8419 pa_buffer_attr buffer_attr;
\r
8420 buffer_attr.fragsize = bufferBytes;
\r
8421 buffer_attr.maxlength = -1;
\r
8423 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8424 if ( !pah->s_rec ) {
\r
8425 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8430 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8431 if ( !pah->s_play ) {
\r
8432 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8440 if ( stream_.mode == UNINITIALIZED )
\r
8441 stream_.mode = mode;
\r
8442 else if ( stream_.mode == mode )
\r
8445 stream_.mode = DUPLEX;
\r
8447 if ( !stream_.callbackInfo.isRunning ) {
\r
8448 stream_.callbackInfo.object = this;
\r
8449 stream_.callbackInfo.isRunning = true;
\r
8450 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8451 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8456 stream_.state = STREAM_STOPPED;
\r
8460 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8461 pthread_cond_destroy( &pah->runnable_cv );
\r
8463 stream_.apiHandle = 0;
\r
8466 for ( int i=0; i<2; i++ ) {
\r
8467 if ( stream_.userBuffer[i] ) {
\r
8468 free( stream_.userBuffer[i] );
\r
8469 stream_.userBuffer[i] = 0;
\r
8473 if ( stream_.deviceBuffer ) {
\r
8474 free( stream_.deviceBuffer );
\r
8475 stream_.deviceBuffer = 0;
\r
8481 //******************** End of __LINUX_PULSE__ *********************//
\r
8484 #if defined(__LINUX_OSS__)
\r
8486 #include <unistd.h>
\r
8487 #include <sys/ioctl.h>
\r
8488 #include <unistd.h>
\r
8489 #include <fcntl.h>
\r
8490 #include <sys/soundcard.h>
\r
8491 #include <errno.h>
\r
8494 static void *ossCallbackHandler(void * ptr);
\r
8496 // A structure to hold various information related to the OSS API
\r
8497 // implementation.
\r
8498 struct OssHandle {
\r
8499 int id[2]; // device ids
\r
8502 pthread_cond_t runnable;
\r
8505 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8508 RtApiOss :: RtApiOss()
\r
8510 // Nothing to do here.
\r
8513 RtApiOss :: ~RtApiOss()
\r
8515 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8518 unsigned int RtApiOss :: getDeviceCount( void )
\r
8520 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8521 if ( mixerfd == -1 ) {
\r
8522 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8523 error( RtAudioError::WARNING );
\r
8527 oss_sysinfo sysinfo;
\r
8528 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8530 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8531 error( RtAudioError::WARNING );
\r
8536 return sysinfo.numaudios;
\r
8539 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8541 RtAudio::DeviceInfo info;
\r
8542 info.probed = false;
\r
8544 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8545 if ( mixerfd == -1 ) {
\r
8546 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8547 error( RtAudioError::WARNING );
\r
8551 oss_sysinfo sysinfo;
\r
8552 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8553 if ( result == -1 ) {
\r
8555 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8556 error( RtAudioError::WARNING );
\r
8560 unsigned nDevices = sysinfo.numaudios;
\r
8561 if ( nDevices == 0 ) {
\r
8563 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8564 error( RtAudioError::INVALID_USE );
\r
8568 if ( device >= nDevices ) {
\r
8570 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8571 error( RtAudioError::INVALID_USE );
\r
8575 oss_audioinfo ainfo;
\r
8576 ainfo.dev = device;
\r
8577 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8579 if ( result == -1 ) {
\r
8580 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8581 errorText_ = errorStream_.str();
\r
8582 error( RtAudioError::WARNING );
\r
8587 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8588 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8589 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8590 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8591 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8594 // Probe data formats ... do for input
\r
8595 unsigned long mask = ainfo.iformats;
\r
8596 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8597 info.nativeFormats |= RTAUDIO_SINT16;
\r
8598 if ( mask & AFMT_S8 )
\r
8599 info.nativeFormats |= RTAUDIO_SINT8;
\r
8600 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8601 info.nativeFormats |= RTAUDIO_SINT32;
\r
8602 if ( mask & AFMT_FLOAT )
\r
8603 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8604 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8605 info.nativeFormats |= RTAUDIO_SINT24;
\r
8607 // Check that we have at least one supported format
\r
8608 if ( info.nativeFormats == 0 ) {
\r
8609 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8610 errorText_ = errorStream_.str();
\r
8611 error( RtAudioError::WARNING );
\r
8615 // Probe the supported sample rates.
\r
8616 info.sampleRates.clear();
\r
8617 if ( ainfo.nrates ) {
\r
8618 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8619 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8620 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8621 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8628 // Check min and max rate values;
\r
8629 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8630 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8631 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8635 if ( info.sampleRates.size() == 0 ) {
\r
8636 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8637 errorText_ = errorStream_.str();
\r
8638 error( RtAudioError::WARNING );
\r
8641 info.probed = true;
\r
8642 info.name = ainfo.name;
\r
8649 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8650 unsigned int firstChannel, unsigned int sampleRate,
\r
8651 RtAudioFormat format, unsigned int *bufferSize,
\r
8652 RtAudio::StreamOptions *options )
\r
8654 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8655 if ( mixerfd == -1 ) {
\r
8656 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8660 oss_sysinfo sysinfo;
\r
8661 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8662 if ( result == -1 ) {
\r
8664 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8668 unsigned nDevices = sysinfo.numaudios;
\r
8669 if ( nDevices == 0 ) {
\r
8670 // This should not happen because a check is made before this function is called.
\r
8672 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8676 if ( device >= nDevices ) {
\r
8677 // This should not happen because a check is made before this function is called.
\r
8679 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8683 oss_audioinfo ainfo;
\r
8684 ainfo.dev = device;
\r
8685 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8687 if ( result == -1 ) {
\r
8688 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8689 errorText_ = errorStream_.str();
\r
8693 // Check if device supports input or output
\r
8694 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8695 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8696 if ( mode == OUTPUT )
\r
8697 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8699 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8700 errorText_ = errorStream_.str();
\r
8705 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8706 if ( mode == OUTPUT )
\r
8707 flags |= O_WRONLY;
\r
8708 else { // mode == INPUT
\r
8709 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8710 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8711 close( handle->id[0] );
\r
8712 handle->id[0] = 0;
\r
8713 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8714 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8715 errorText_ = errorStream_.str();
\r
8718 // Check that the number previously set channels is the same.
\r
8719 if ( stream_.nUserChannels[0] != channels ) {
\r
8720 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8721 errorText_ = errorStream_.str();
\r
8727 flags |= O_RDONLY;
\r
8730 // Set exclusive access if specified.
\r
8731 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8733 // Try to open the device.
\r
8735 fd = open( ainfo.devnode, flags, 0 );
\r
8737 if ( errno == EBUSY )
\r
8738 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8740 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8741 errorText_ = errorStream_.str();
\r
8745 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8747 if ( flags | O_RDWR ) {
\r
8748 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8749 if ( result == -1) {
\r
8750 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8751 errorText_ = errorStream_.str();
\r
8757 // Check the device channel support.
\r
8758 stream_.nUserChannels[mode] = channels;
\r
8759 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8761 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8762 errorText_ = errorStream_.str();
\r
8766 // Set the number of channels.
\r
8767 int deviceChannels = channels + firstChannel;
\r
8768 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8769 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8771 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8772 errorText_ = errorStream_.str();
\r
8775 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8777 // Get the data format mask
\r
8779 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8780 if ( result == -1 ) {
\r
8782 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8783 errorText_ = errorStream_.str();
\r
8787 // Determine how to set the device format.
\r
8788 stream_.userFormat = format;
\r
8789 int deviceFormat = -1;
\r
8790 stream_.doByteSwap[mode] = false;
\r
8791 if ( format == RTAUDIO_SINT8 ) {
\r
8792 if ( mask & AFMT_S8 ) {
\r
8793 deviceFormat = AFMT_S8;
\r
8794 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8797 else if ( format == RTAUDIO_SINT16 ) {
\r
8798 if ( mask & AFMT_S16_NE ) {
\r
8799 deviceFormat = AFMT_S16_NE;
\r
8800 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8802 else if ( mask & AFMT_S16_OE ) {
\r
8803 deviceFormat = AFMT_S16_OE;
\r
8804 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8805 stream_.doByteSwap[mode] = true;
\r
8808 else if ( format == RTAUDIO_SINT24 ) {
\r
8809 if ( mask & AFMT_S24_NE ) {
\r
8810 deviceFormat = AFMT_S24_NE;
\r
8811 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8813 else if ( mask & AFMT_S24_OE ) {
\r
8814 deviceFormat = AFMT_S24_OE;
\r
8815 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8816 stream_.doByteSwap[mode] = true;
\r
8819 else if ( format == RTAUDIO_SINT32 ) {
\r
8820 if ( mask & AFMT_S32_NE ) {
\r
8821 deviceFormat = AFMT_S32_NE;
\r
8822 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8824 else if ( mask & AFMT_S32_OE ) {
\r
8825 deviceFormat = AFMT_S32_OE;
\r
8826 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8827 stream_.doByteSwap[mode] = true;
\r
8831 if ( deviceFormat == -1 ) {
\r
8832 // The user requested format is not natively supported by the device.
\r
8833 if ( mask & AFMT_S16_NE ) {
\r
8834 deviceFormat = AFMT_S16_NE;
\r
8835 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8837 else if ( mask & AFMT_S32_NE ) {
\r
8838 deviceFormat = AFMT_S32_NE;
\r
8839 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8841 else if ( mask & AFMT_S24_NE ) {
\r
8842 deviceFormat = AFMT_S24_NE;
\r
8843 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8845 else if ( mask & AFMT_S16_OE ) {
\r
8846 deviceFormat = AFMT_S16_OE;
\r
8847 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8848 stream_.doByteSwap[mode] = true;
\r
8850 else if ( mask & AFMT_S32_OE ) {
\r
8851 deviceFormat = AFMT_S32_OE;
\r
8852 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8853 stream_.doByteSwap[mode] = true;
\r
8855 else if ( mask & AFMT_S24_OE ) {
\r
8856 deviceFormat = AFMT_S24_OE;
\r
8857 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8858 stream_.doByteSwap[mode] = true;
\r
8860 else if ( mask & AFMT_S8) {
\r
8861 deviceFormat = AFMT_S8;
\r
8862 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8866 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8867 // This really shouldn't happen ...
\r
8869 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8870 errorText_ = errorStream_.str();
\r
8874 // Set the data format.
\r
8875 int temp = deviceFormat;
\r
8876 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8877 if ( result == -1 || deviceFormat != temp ) {
\r
8879 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8880 errorText_ = errorStream_.str();
\r
8884 // Attempt to set the buffer size. According to OSS, the minimum
\r
8885 // number of buffers is two. The supposed minimum buffer size is 16
\r
8886 // bytes, so that will be our lower bound. The argument to this
\r
8887 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8888 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8889 // We'll check the actual value used near the end of the setup
\r
8891 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8892 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8894 if ( options ) buffers = options->numberOfBuffers;
\r
8895 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8896 if ( buffers < 2 ) buffers = 3;
\r
8897 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8898 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8899 if ( result == -1 ) {
\r
8901 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8902 errorText_ = errorStream_.str();
\r
8905 stream_.nBuffers = buffers;
\r
8907 // Save buffer size (in sample frames).
\r
8908 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8909 stream_.bufferSize = *bufferSize;
\r
8911 // Set the sample rate.
\r
8912 int srate = sampleRate;
\r
8913 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8914 if ( result == -1 ) {
\r
8916 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8917 errorText_ = errorStream_.str();
\r
8921 // Verify the sample rate setup worked.
\r
8922 if ( abs( srate - sampleRate ) > 100 ) {
\r
8924 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8925 errorText_ = errorStream_.str();
\r
8928 stream_.sampleRate = sampleRate;
\r
8930 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8931 // We're doing duplex setup here.
\r
8932 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8933 stream_.nDeviceChannels[0] = deviceChannels;
\r
8936 // Set interleaving parameters.
\r
8937 stream_.userInterleaved = true;
\r
8938 stream_.deviceInterleaved[mode] = true;
\r
8939 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8940 stream_.userInterleaved = false;
\r
8942 // Set flags for buffer conversion
\r
8943 stream_.doConvertBuffer[mode] = false;
\r
8944 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8945 stream_.doConvertBuffer[mode] = true;
\r
8946 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8947 stream_.doConvertBuffer[mode] = true;
\r
8948 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8949 stream_.nUserChannels[mode] > 1 )
\r
8950 stream_.doConvertBuffer[mode] = true;
\r
8952 // Allocate the stream handles if necessary and then save.
\r
8953 if ( stream_.apiHandle == 0 ) {
\r
8955 handle = new OssHandle;
\r
8957 catch ( std::bad_alloc& ) {
\r
8958 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8962 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8963 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8967 stream_.apiHandle = (void *) handle;
\r
8970 handle = (OssHandle *) stream_.apiHandle;
\r
8972 handle->id[mode] = fd;
\r
8974 // Allocate necessary internal buffers.
\r
8975 unsigned long bufferBytes;
\r
8976 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8977 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8978 if ( stream_.userBuffer[mode] == NULL ) {
\r
8979 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
8983 if ( stream_.doConvertBuffer[mode] ) {
\r
8985 bool makeBuffer = true;
\r
8986 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8987 if ( mode == INPUT ) {
\r
8988 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8989 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8990 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8994 if ( makeBuffer ) {
\r
8995 bufferBytes *= *bufferSize;
\r
8996 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8997 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8998 if ( stream_.deviceBuffer == NULL ) {
\r
8999 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9005 stream_.device[mode] = device;
\r
9006 stream_.state = STREAM_STOPPED;
\r
9008 // Setup the buffer conversion information structure.
\r
9009 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9011 // Setup thread if necessary.
\r
9012 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9013 // We had already set up an output stream.
\r
9014 stream_.mode = DUPLEX;
\r
9015 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9018 stream_.mode = mode;
\r
9020 // Setup callback thread.
\r
9021 stream_.callbackInfo.object = (void *) this;
\r
9023 // Set the thread attributes for joinable and realtime scheduling
\r
9024 // priority. The higher priority will only take affect if the
\r
9025 // program is run as root or suid.
\r
9026 pthread_attr_t attr;
\r
9027 pthread_attr_init( &attr );
\r
9028 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9029 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9030 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9031 struct sched_param param;
\r
9032 int priority = options->priority;
\r
9033 int min = sched_get_priority_min( SCHED_RR );
\r
9034 int max = sched_get_priority_max( SCHED_RR );
\r
9035 if ( priority < min ) priority = min;
\r
9036 else if ( priority > max ) priority = max;
\r
9037 param.sched_priority = priority;
\r
9038 pthread_attr_setschedparam( &attr, ¶m );
\r
9039 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9042 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9044 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9047 stream_.callbackInfo.isRunning = true;
\r
9048 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9049 pthread_attr_destroy( &attr );
\r
9051 stream_.callbackInfo.isRunning = false;
\r
9052 errorText_ = "RtApiOss::error creating callback thread!";
\r
9061 pthread_cond_destroy( &handle->runnable );
\r
9062 if ( handle->id[0] ) close( handle->id[0] );
\r
9063 if ( handle->id[1] ) close( handle->id[1] );
\r
9065 stream_.apiHandle = 0;
\r
9068 for ( int i=0; i<2; i++ ) {
\r
9069 if ( stream_.userBuffer[i] ) {
\r
9070 free( stream_.userBuffer[i] );
\r
9071 stream_.userBuffer[i] = 0;
\r
9075 if ( stream_.deviceBuffer ) {
\r
9076 free( stream_.deviceBuffer );
\r
9077 stream_.deviceBuffer = 0;
\r
9083 void RtApiOss :: closeStream()
\r
9085 if ( stream_.state == STREAM_CLOSED ) {
\r
9086 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9087 error( RtAudioError::WARNING );
\r
9091 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9092 stream_.callbackInfo.isRunning = false;
\r
9093 MUTEX_LOCK( &stream_.mutex );
\r
9094 if ( stream_.state == STREAM_STOPPED )
\r
9095 pthread_cond_signal( &handle->runnable );
\r
9096 MUTEX_UNLOCK( &stream_.mutex );
\r
9097 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9099 if ( stream_.state == STREAM_RUNNING ) {
\r
9100 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9101 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9103 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9104 stream_.state = STREAM_STOPPED;
\r
9108 pthread_cond_destroy( &handle->runnable );
\r
9109 if ( handle->id[0] ) close( handle->id[0] );
\r
9110 if ( handle->id[1] ) close( handle->id[1] );
\r
9112 stream_.apiHandle = 0;
\r
9115 for ( int i=0; i<2; i++ ) {
\r
9116 if ( stream_.userBuffer[i] ) {
\r
9117 free( stream_.userBuffer[i] );
\r
9118 stream_.userBuffer[i] = 0;
\r
9122 if ( stream_.deviceBuffer ) {
\r
9123 free( stream_.deviceBuffer );
\r
9124 stream_.deviceBuffer = 0;
\r
9127 stream_.mode = UNINITIALIZED;
\r
9128 stream_.state = STREAM_CLOSED;
\r
9131 void RtApiOss :: startStream()
\r
9134 if ( stream_.state == STREAM_RUNNING ) {
\r
9135 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9136 error( RtAudioError::WARNING );
\r
9140 MUTEX_LOCK( &stream_.mutex );
\r
9142 stream_.state = STREAM_RUNNING;
\r
9144 // No need to do anything else here ... OSS automatically starts
\r
9145 // when fed samples.
\r
9147 MUTEX_UNLOCK( &stream_.mutex );
\r
9149 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9150 pthread_cond_signal( &handle->runnable );
\r
9153 void RtApiOss :: stopStream()
\r
9156 if ( stream_.state == STREAM_STOPPED ) {
\r
9157 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9158 error( RtAudioError::WARNING );
\r
9162 MUTEX_LOCK( &stream_.mutex );
\r
9164 // The state might change while waiting on a mutex.
\r
9165 if ( stream_.state == STREAM_STOPPED ) {
\r
9166 MUTEX_UNLOCK( &stream_.mutex );
\r
9171 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9172 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9174 // Flush the output with zeros a few times.
\r
9177 RtAudioFormat format;
\r
9179 if ( stream_.doConvertBuffer[0] ) {
\r
9180 buffer = stream_.deviceBuffer;
\r
9181 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9182 format = stream_.deviceFormat[0];
\r
9185 buffer = stream_.userBuffer[0];
\r
9186 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9187 format = stream_.userFormat;
\r
9190 memset( buffer, 0, samples * formatBytes(format) );
\r
9191 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9192 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9193 if ( result == -1 ) {
\r
9194 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9195 error( RtAudioError::WARNING );
\r
9199 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9200 if ( result == -1 ) {
\r
9201 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9202 errorText_ = errorStream_.str();
\r
9205 handle->triggered = false;
\r
9208 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9209 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9210 if ( result == -1 ) {
\r
9211 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9212 errorText_ = errorStream_.str();
\r
9218 stream_.state = STREAM_STOPPED;
\r
9219 MUTEX_UNLOCK( &stream_.mutex );
\r
9221 if ( result != -1 ) return;
\r
9222 error( RtAudioError::SYSTEM_ERROR );
\r
9225 void RtApiOss :: abortStream()
\r
9228 if ( stream_.state == STREAM_STOPPED ) {
\r
9229 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9230 error( RtAudioError::WARNING );
\r
9234 MUTEX_LOCK( &stream_.mutex );
\r
9236 // The state might change while waiting on a mutex.
\r
9237 if ( stream_.state == STREAM_STOPPED ) {
\r
9238 MUTEX_UNLOCK( &stream_.mutex );
\r
9243 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9244 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9245 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9246 if ( result == -1 ) {
\r
9247 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9248 errorText_ = errorStream_.str();
\r
9251 handle->triggered = false;
\r
9254 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9255 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9256 if ( result == -1 ) {
\r
9257 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9258 errorText_ = errorStream_.str();
\r
9264 stream_.state = STREAM_STOPPED;
\r
9265 MUTEX_UNLOCK( &stream_.mutex );
\r
9267 if ( result != -1 ) return;
\r
9268 error( RtAudioError::SYSTEM_ERROR );
\r
9271 void RtApiOss :: callbackEvent()
\r
9273 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9274 if ( stream_.state == STREAM_STOPPED ) {
\r
9275 MUTEX_LOCK( &stream_.mutex );
\r
9276 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9277 if ( stream_.state != STREAM_RUNNING ) {
\r
9278 MUTEX_UNLOCK( &stream_.mutex );
\r
9281 MUTEX_UNLOCK( &stream_.mutex );
\r
9284 if ( stream_.state == STREAM_CLOSED ) {
\r
9285 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9286 error( RtAudioError::WARNING );
\r
9290 // Invoke user callback to get fresh output data.
\r
9291 int doStopStream = 0;
\r
9292 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9293 double streamTime = getStreamTime();
\r
9294 RtAudioStreamStatus status = 0;
\r
9295 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9296 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9297 handle->xrun[0] = false;
\r
9299 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9300 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9301 handle->xrun[1] = false;
\r
9303 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9304 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9305 if ( doStopStream == 2 ) {
\r
9306 this->abortStream();
\r
9310 MUTEX_LOCK( &stream_.mutex );
\r
9312 // The state might change while waiting on a mutex.
\r
9313 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9318 RtAudioFormat format;
\r
9320 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9322 // Setup parameters and do buffer conversion if necessary.
\r
9323 if ( stream_.doConvertBuffer[0] ) {
\r
9324 buffer = stream_.deviceBuffer;
\r
9325 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9326 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9327 format = stream_.deviceFormat[0];
\r
9330 buffer = stream_.userBuffer[0];
\r
9331 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9332 format = stream_.userFormat;
\r
9335 // Do byte swapping if necessary.
\r
9336 if ( stream_.doByteSwap[0] )
\r
9337 byteSwapBuffer( buffer, samples, format );
\r
9339 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9341 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9342 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9343 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9344 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9345 handle->triggered = true;
\r
9348 // Write samples to device.
\r
9349 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9351 if ( result == -1 ) {
\r
9352 // We'll assume this is an underrun, though there isn't a
\r
9353 // specific means for determining that.
\r
9354 handle->xrun[0] = true;
\r
9355 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9356 error( RtAudioError::WARNING );
\r
9357 // Continue on to input section.
\r
9361 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9363 // Setup parameters.
\r
9364 if ( stream_.doConvertBuffer[1] ) {
\r
9365 buffer = stream_.deviceBuffer;
\r
9366 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9367 format = stream_.deviceFormat[1];
\r
9370 buffer = stream_.userBuffer[1];
\r
9371 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9372 format = stream_.userFormat;
\r
9375 // Read samples from device.
\r
9376 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9378 if ( result == -1 ) {
\r
9379 // We'll assume this is an overrun, though there isn't a
\r
9380 // specific means for determining that.
\r
9381 handle->xrun[1] = true;
\r
9382 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9383 error( RtAudioError::WARNING );
\r
9387 // Do byte swapping if necessary.
\r
9388 if ( stream_.doByteSwap[1] )
\r
9389 byteSwapBuffer( buffer, samples, format );
\r
9391 // Do buffer conversion if necessary.
\r
9392 if ( stream_.doConvertBuffer[1] )
\r
9393 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9397 MUTEX_UNLOCK( &stream_.mutex );
\r
9399 RtApi::tickStreamTime();
\r
9400 if ( doStopStream == 1 ) this->stopStream();
\r
9403 static void *ossCallbackHandler( void *ptr )
\r
9405 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9406 RtApiOss *object = (RtApiOss *) info->object;
\r
9407 bool *isRunning = &info->isRunning;
\r
9409 while ( *isRunning == true ) {
\r
9410 pthread_testcancel();
\r
9411 object->callbackEvent();
\r
9414 pthread_exit( NULL );
\r
9417 //******************** End of __LINUX_OSS__ *********************//
\r
9421 // *************************************************** //
\r
9423 // Protected common (OS-independent) RtAudio methods.
\r
9425 // *************************************************** //
\r
9427 // This method can be modified to control the behavior of error
\r
9428 // message printing.
\r
9429 void RtApi :: error( RtAudioError::Type type )
\r
9431 errorStream_.str(""); // clear the ostringstream
\r
9433 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9434 if ( errorCallback ) {
\r
9435 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9437 if ( firstErrorOccurred_ )
\r
9440 firstErrorOccurred_ = true;
\r
9441 const std::string errorMessage = errorText_;
\r
9443 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9444 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9448 errorCallback( type, errorMessage );
\r
9449 firstErrorOccurred_ = false;
\r
9453 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9454 std::cerr << '\n' << errorText_ << "\n\n";
\r
9455 else if ( type != RtAudioError::WARNING )
\r
9456 throw( RtAudioError( errorText_, type ) );
\r
9459 void RtApi :: verifyStream()
\r
9461 if ( stream_.state == STREAM_CLOSED ) {
\r
9462 errorText_ = "RtApi:: a stream is not open!";
\r
9463 error( RtAudioError::INVALID_USE );
\r
9467 void RtApi :: clearStreamInfo()
\r
9469 stream_.mode = UNINITIALIZED;
\r
9470 stream_.state = STREAM_CLOSED;
\r
9471 stream_.sampleRate = 0;
\r
9472 stream_.bufferSize = 0;
\r
9473 stream_.nBuffers = 0;
\r
9474 stream_.userFormat = 0;
\r
9475 stream_.userInterleaved = true;
\r
9476 stream_.streamTime = 0.0;
\r
9477 stream_.apiHandle = 0;
\r
9478 stream_.deviceBuffer = 0;
\r
9479 stream_.callbackInfo.callback = 0;
\r
9480 stream_.callbackInfo.userData = 0;
\r
9481 stream_.callbackInfo.isRunning = false;
\r
9482 stream_.callbackInfo.errorCallback = 0;
\r
9483 for ( int i=0; i<2; i++ ) {
\r
9484 stream_.device[i] = 11111;
\r
9485 stream_.doConvertBuffer[i] = false;
\r
9486 stream_.deviceInterleaved[i] = true;
\r
9487 stream_.doByteSwap[i] = false;
\r
9488 stream_.nUserChannels[i] = 0;
\r
9489 stream_.nDeviceChannels[i] = 0;
\r
9490 stream_.channelOffset[i] = 0;
\r
9491 stream_.deviceFormat[i] = 0;
\r
9492 stream_.latency[i] = 0;
\r
9493 stream_.userBuffer[i] = 0;
\r
9494 stream_.convertInfo[i].channels = 0;
\r
9495 stream_.convertInfo[i].inJump = 0;
\r
9496 stream_.convertInfo[i].outJump = 0;
\r
9497 stream_.convertInfo[i].inFormat = 0;
\r
9498 stream_.convertInfo[i].outFormat = 0;
\r
9499 stream_.convertInfo[i].inOffset.clear();
\r
9500 stream_.convertInfo[i].outOffset.clear();
\r
9504 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9506 if ( format == RTAUDIO_SINT16 )
\r
9508 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9510 else if ( format == RTAUDIO_FLOAT64 )
\r
9512 else if ( format == RTAUDIO_SINT24 )
\r
9514 else if ( format == RTAUDIO_SINT8 )
\r
9517 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9518 error( RtAudioError::WARNING );
\r
9523 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9525 if ( mode == INPUT ) { // convert device to user buffer
\r
9526 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9527 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9528 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9529 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9531 else { // convert user to device buffer
\r
9532 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9533 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9534 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9535 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9538 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9539 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9541 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9543 // Set up the interleave/deinterleave offsets.
\r
9544 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9545 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9546 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9547 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9548 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9549 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9550 stream_.convertInfo[mode].inJump = 1;
\r
9554 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9555 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9556 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9557 stream_.convertInfo[mode].outJump = 1;
\r
9561 else { // no (de)interleaving
\r
9562 if ( stream_.userInterleaved ) {
\r
9563 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9564 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9565 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9569 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9570 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9571 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9572 stream_.convertInfo[mode].inJump = 1;
\r
9573 stream_.convertInfo[mode].outJump = 1;
\r
9578 // Add channel offset.
\r
9579 if ( firstChannel > 0 ) {
\r
9580 if ( stream_.deviceInterleaved[mode] ) {
\r
9581 if ( mode == OUTPUT ) {
\r
9582 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9583 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9586 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9587 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9591 if ( mode == OUTPUT ) {
\r
9592 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9593 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9596 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9597 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9603 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9605 // This function does format conversion, input/output channel compensation, and
\r
9606 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9607 // the lower three bytes of a 32-bit integer.
\r
9609 // Clear our device buffer when in/out duplex device channels are different
\r
9610 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9611 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9612 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9615 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9617 Float64 *out = (Float64 *)outBuffer;
\r
9619 if (info.inFormat == RTAUDIO_SINT8) {
\r
9620 signed char *in = (signed char *)inBuffer;
\r
9621 scale = 1.0 / 127.5;
\r
9622 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9623 for (j=0; j<info.channels; j++) {
\r
9624 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9625 out[info.outOffset[j]] += 0.5;
\r
9626 out[info.outOffset[j]] *= scale;
\r
9628 in += info.inJump;
\r
9629 out += info.outJump;
\r
9632 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9633 Int16 *in = (Int16 *)inBuffer;
\r
9634 scale = 1.0 / 32767.5;
\r
9635 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9636 for (j=0; j<info.channels; j++) {
\r
9637 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9638 out[info.outOffset[j]] += 0.5;
\r
9639 out[info.outOffset[j]] *= scale;
\r
9641 in += info.inJump;
\r
9642 out += info.outJump;
\r
9645 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9646 Int24 *in = (Int24 *)inBuffer;
\r
9647 scale = 1.0 / 8388607.5;
\r
9648 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9649 for (j=0; j<info.channels; j++) {
\r
9650 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9651 out[info.outOffset[j]] += 0.5;
\r
9652 out[info.outOffset[j]] *= scale;
\r
9654 in += info.inJump;
\r
9655 out += info.outJump;
\r
9658 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9659 Int32 *in = (Int32 *)inBuffer;
\r
9660 scale = 1.0 / 2147483647.5;
\r
9661 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9662 for (j=0; j<info.channels; j++) {
\r
9663 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9664 out[info.outOffset[j]] += 0.5;
\r
9665 out[info.outOffset[j]] *= scale;
\r
9667 in += info.inJump;
\r
9668 out += info.outJump;
\r
9671 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9672 Float32 *in = (Float32 *)inBuffer;
\r
9673 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9674 for (j=0; j<info.channels; j++) {
\r
9675 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9677 in += info.inJump;
\r
9678 out += info.outJump;
\r
9681 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9682 // Channel compensation and/or (de)interleaving only.
\r
9683 Float64 *in = (Float64 *)inBuffer;
\r
9684 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9685 for (j=0; j<info.channels; j++) {
\r
9686 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9688 in += info.inJump;
\r
9689 out += info.outJump;
\r
9693 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9695 Float32 *out = (Float32 *)outBuffer;
\r
9697 if (info.inFormat == RTAUDIO_SINT8) {
\r
9698 signed char *in = (signed char *)inBuffer;
\r
9699 scale = (Float32) ( 1.0 / 127.5 );
\r
9700 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9701 for (j=0; j<info.channels; j++) {
\r
9702 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9703 out[info.outOffset[j]] += 0.5;
\r
9704 out[info.outOffset[j]] *= scale;
\r
9706 in += info.inJump;
\r
9707 out += info.outJump;
\r
9710 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9711 Int16 *in = (Int16 *)inBuffer;
\r
9712 scale = (Float32) ( 1.0 / 32767.5 );
\r
9713 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9714 for (j=0; j<info.channels; j++) {
\r
9715 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9716 out[info.outOffset[j]] += 0.5;
\r
9717 out[info.outOffset[j]] *= scale;
\r
9719 in += info.inJump;
\r
9720 out += info.outJump;
\r
9723 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9724 Int24 *in = (Int24 *)inBuffer;
\r
9725 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9726 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9727 for (j=0; j<info.channels; j++) {
\r
9728 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9729 out[info.outOffset[j]] += 0.5;
\r
9730 out[info.outOffset[j]] *= scale;
\r
9732 in += info.inJump;
\r
9733 out += info.outJump;
\r
9736 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9737 Int32 *in = (Int32 *)inBuffer;
\r
9738 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9739 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9740 for (j=0; j<info.channels; j++) {
\r
9741 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9742 out[info.outOffset[j]] += 0.5;
\r
9743 out[info.outOffset[j]] *= scale;
\r
9745 in += info.inJump;
\r
9746 out += info.outJump;
\r
9749 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9750 // Channel compensation and/or (de)interleaving only.
\r
9751 Float32 *in = (Float32 *)inBuffer;
\r
9752 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9753 for (j=0; j<info.channels; j++) {
\r
9754 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9756 in += info.inJump;
\r
9757 out += info.outJump;
\r
9760 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9761 Float64 *in = (Float64 *)inBuffer;
\r
9762 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9763 for (j=0; j<info.channels; j++) {
\r
9764 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9766 in += info.inJump;
\r
9767 out += info.outJump;
\r
9771 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9772 Int32 *out = (Int32 *)outBuffer;
\r
9773 if (info.inFormat == RTAUDIO_SINT8) {
\r
9774 signed char *in = (signed char *)inBuffer;
\r
9775 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9776 for (j=0; j<info.channels; j++) {
\r
9777 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9778 out[info.outOffset[j]] <<= 24;
\r
9780 in += info.inJump;
\r
9781 out += info.outJump;
\r
9784 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9785 Int16 *in = (Int16 *)inBuffer;
\r
9786 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9787 for (j=0; j<info.channels; j++) {
\r
9788 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9789 out[info.outOffset[j]] <<= 16;
\r
9791 in += info.inJump;
\r
9792 out += info.outJump;
\r
9795 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9796 Int24 *in = (Int24 *)inBuffer;
\r
9797 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9798 for (j=0; j<info.channels; j++) {
\r
9799 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9800 out[info.outOffset[j]] <<= 8;
\r
9802 in += info.inJump;
\r
9803 out += info.outJump;
\r
9806 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9807 // Channel compensation and/or (de)interleaving only.
\r
9808 Int32 *in = (Int32 *)inBuffer;
\r
9809 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9810 for (j=0; j<info.channels; j++) {
\r
9811 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9813 in += info.inJump;
\r
9814 out += info.outJump;
\r
9817 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9818 Float32 *in = (Float32 *)inBuffer;
\r
9819 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9820 for (j=0; j<info.channels; j++) {
\r
9821 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9823 in += info.inJump;
\r
9824 out += info.outJump;
\r
9827 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9828 Float64 *in = (Float64 *)inBuffer;
\r
9829 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9830 for (j=0; j<info.channels; j++) {
\r
9831 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9833 in += info.inJump;
\r
9834 out += info.outJump;
\r
9838 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9839 Int24 *out = (Int24 *)outBuffer;
\r
9840 if (info.inFormat == RTAUDIO_SINT8) {
\r
9841 signed char *in = (signed char *)inBuffer;
\r
9842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9843 for (j=0; j<info.channels; j++) {
\r
9844 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9845 //out[info.outOffset[j]] <<= 16;
\r
9847 in += info.inJump;
\r
9848 out += info.outJump;
\r
9851 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9852 Int16 *in = (Int16 *)inBuffer;
\r
9853 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9854 for (j=0; j<info.channels; j++) {
\r
9855 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9856 //out[info.outOffset[j]] <<= 8;
\r
9858 in += info.inJump;
\r
9859 out += info.outJump;
\r
9862 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9863 // Channel compensation and/or (de)interleaving only.
\r
9864 Int24 *in = (Int24 *)inBuffer;
\r
9865 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9866 for (j=0; j<info.channels; j++) {
\r
9867 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9869 in += info.inJump;
\r
9870 out += info.outJump;
\r
9873 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9874 Int32 *in = (Int32 *)inBuffer;
\r
9875 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9876 for (j=0; j<info.channels; j++) {
\r
9877 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9878 //out[info.outOffset[j]] >>= 8;
\r
9880 in += info.inJump;
\r
9881 out += info.outJump;
\r
9884 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9885 Float32 *in = (Float32 *)inBuffer;
\r
9886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9887 for (j=0; j<info.channels; j++) {
\r
9888 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9890 in += info.inJump;
\r
9891 out += info.outJump;
\r
9894 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9895 Float64 *in = (Float64 *)inBuffer;
\r
9896 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9897 for (j=0; j<info.channels; j++) {
\r
9898 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9900 in += info.inJump;
\r
9901 out += info.outJump;
\r
9905 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9906 Int16 *out = (Int16 *)outBuffer;
\r
9907 if (info.inFormat == RTAUDIO_SINT8) {
\r
9908 signed char *in = (signed char *)inBuffer;
\r
9909 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9910 for (j=0; j<info.channels; j++) {
\r
9911 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9912 out[info.outOffset[j]] <<= 8;
\r
9914 in += info.inJump;
\r
9915 out += info.outJump;
\r
9918 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9919 // Channel compensation and/or (de)interleaving only.
\r
9920 Int16 *in = (Int16 *)inBuffer;
\r
9921 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9922 for (j=0; j<info.channels; j++) {
\r
9923 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9925 in += info.inJump;
\r
9926 out += info.outJump;
\r
9929 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9930 Int24 *in = (Int24 *)inBuffer;
\r
9931 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9932 for (j=0; j<info.channels; j++) {
\r
9933 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9935 in += info.inJump;
\r
9936 out += info.outJump;
\r
9939 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9940 Int32 *in = (Int32 *)inBuffer;
\r
9941 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9942 for (j=0; j<info.channels; j++) {
\r
9943 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9945 in += info.inJump;
\r
9946 out += info.outJump;
\r
9949 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9950 Float32 *in = (Float32 *)inBuffer;
\r
9951 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9952 for (j=0; j<info.channels; j++) {
\r
9953 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9955 in += info.inJump;
\r
9956 out += info.outJump;
\r
9959 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9960 Float64 *in = (Float64 *)inBuffer;
\r
9961 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9962 for (j=0; j<info.channels; j++) {
\r
9963 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9965 in += info.inJump;
\r
9966 out += info.outJump;
\r
9970 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9971 signed char *out = (signed char *)outBuffer;
\r
9972 if (info.inFormat == RTAUDIO_SINT8) {
\r
9973 // Channel compensation and/or (de)interleaving only.
\r
9974 signed char *in = (signed char *)inBuffer;
\r
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9976 for (j=0; j<info.channels; j++) {
\r
9977 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9979 in += info.inJump;
\r
9980 out += info.outJump;
\r
9983 if (info.inFormat == RTAUDIO_SINT16) {
\r
9984 Int16 *in = (Int16 *)inBuffer;
\r
9985 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9986 for (j=0; j<info.channels; j++) {
\r
9987 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
9989 in += info.inJump;
\r
9990 out += info.outJump;
\r
9993 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9994 Int24 *in = (Int24 *)inBuffer;
\r
9995 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9996 for (j=0; j<info.channels; j++) {
\r
9997 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
9999 in += info.inJump;
\r
10000 out += info.outJump;
\r
10003 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10004 Int32 *in = (Int32 *)inBuffer;
\r
10005 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10006 for (j=0; j<info.channels; j++) {
\r
10007 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10009 in += info.inJump;
\r
10010 out += info.outJump;
\r
10013 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10014 Float32 *in = (Float32 *)inBuffer;
\r
10015 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10016 for (j=0; j<info.channels; j++) {
\r
10017 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10019 in += info.inJump;
\r
10020 out += info.outJump;
\r
10023 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10024 Float64 *in = (Float64 *)inBuffer;
\r
10025 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10026 for (j=0; j<info.channels; j++) {
\r
10027 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10029 in += info.inJump;
\r
10030 out += info.outJump;
\r
10036 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10037 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10038 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10040 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10042 register char val;
\r
10043 register char *ptr;
\r
10046 if ( format == RTAUDIO_SINT16 ) {
\r
10047 for ( unsigned int i=0; i<samples; i++ ) {
\r
10048 // Swap 1st and 2nd bytes.
\r
10050 *(ptr) = *(ptr+1);
\r
10053 // Increment 2 bytes.
\r
10057 else if ( format == RTAUDIO_SINT32 ||
\r
10058 format == RTAUDIO_FLOAT32 ) {
\r
10059 for ( unsigned int i=0; i<samples; i++ ) {
\r
10060 // Swap 1st and 4th bytes.
\r
10062 *(ptr) = *(ptr+3);
\r
10065 // Swap 2nd and 3rd bytes.
\r
10068 *(ptr) = *(ptr+1);
\r
10071 // Increment 3 more bytes.
\r
10075 else if ( format == RTAUDIO_SINT24 ) {
\r
10076 for ( unsigned int i=0; i<samples; i++ ) {
\r
10077 // Swap 1st and 3rd bytes.
\r
10079 *(ptr) = *(ptr+2);
\r
10082 // Increment 2 more bytes.
\r
10086 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10087 for ( unsigned int i=0; i<samples; i++ ) {
\r
10088 // Swap 1st and 8th bytes
\r
10090 *(ptr) = *(ptr+7);
\r
10093 // Swap 2nd and 7th bytes
\r
10096 *(ptr) = *(ptr+5);
\r
10099 // Swap 3rd and 6th bytes
\r
10102 *(ptr) = *(ptr+3);
\r
10105 // Swap 4th and 5th bytes
\r
10108 *(ptr) = *(ptr+1);
\r
10111 // Increment 5 more bytes.
\r
10117 // Indentation settings for Vim and Emacs
\r
10119 // Local Variables:
\r
10120 // c-basic-offset: 2
\r
10121 // indent-tabs-mode: nil
\r
10124 // vim: et sts=2 sw=2
\r