1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1687 // Don't bother draining input
\r
1688 if ( handle->drainCounter ) {
\r
1689 handle->drainCounter++;
\r
1693 AudioDeviceID inputDevice;
\r
1694 inputDevice = handle->id[1];
\r
1695 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1697 if ( handle->nStreams[1] == 1 ) {
\r
1698 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1699 convertBuffer( stream_.userBuffer[1],
\r
1700 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1701 stream_.convertInfo[1] );
\r
1703 else { // copy to user buffer
\r
1704 memcpy( stream_.userBuffer[1],
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1706 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1709 else { // read from multiple streams
\r
1710 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1711 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1713 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1714 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1715 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1716 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1717 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1720 else { // read from multiple multi-channel streams
\r
1721 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1722 Float32 *out, *in;
\r
1724 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1725 UInt32 outChannels = stream_.nUserChannels[1];
\r
1726 if ( stream_.doConvertBuffer[1] ) {
\r
1727 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1728 outChannels = stream_.nDeviceChannels[1];
\r
1731 if ( outInterleaved ) outOffset = 1;
\r
1732 else outOffset = stream_.bufferSize;
\r
1734 channelsLeft = outChannels;
\r
1735 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1737 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1738 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1741 // Account for possible channel offset in first stream
\r
1742 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1743 streamChannels -= stream_.channelOffset[1];
\r
1744 inJump = stream_.channelOffset[1];
\r
1748 // Account for possible unread channels at end of the last stream
\r
1749 if ( streamChannels > channelsLeft ) {
\r
1750 inJump = streamChannels - channelsLeft;
\r
1751 streamChannels = channelsLeft;
\r
1754 // Determine output buffer offsets and skips
\r
1755 if ( outInterleaved ) {
\r
1756 outJump = outChannels;
\r
1757 out += outChannels - channelsLeft;
\r
1761 out += (outChannels - channelsLeft) * outOffset;
\r
1764 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1765 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1766 out[j*outOffset] = *in++;
\r
1771 channelsLeft -= streamChannels;
\r
1775 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1776 convertBuffer( stream_.userBuffer[1],
\r
1777 stream_.deviceBuffer,
\r
1778 stream_.convertInfo[1] );
\r
1784 //MUTEX_UNLOCK( &stream_.mutex );
\r
1786 RtApi::tickStreamTime();
\r
1790 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1794 case kAudioHardwareNotRunningError:
\r
1795 return "kAudioHardwareNotRunningError";
\r
1797 case kAudioHardwareUnspecifiedError:
\r
1798 return "kAudioHardwareUnspecifiedError";
\r
1800 case kAudioHardwareUnknownPropertyError:
\r
1801 return "kAudioHardwareUnknownPropertyError";
\r
1803 case kAudioHardwareBadPropertySizeError:
\r
1804 return "kAudioHardwareBadPropertySizeError";
\r
1806 case kAudioHardwareIllegalOperationError:
\r
1807 return "kAudioHardwareIllegalOperationError";
\r
1809 case kAudioHardwareBadObjectError:
\r
1810 return "kAudioHardwareBadObjectError";
\r
1812 case kAudioHardwareBadDeviceError:
\r
1813 return "kAudioHardwareBadDeviceError";
\r
1815 case kAudioHardwareBadStreamError:
\r
1816 return "kAudioHardwareBadStreamError";
\r
1818 case kAudioHardwareUnsupportedOperationError:
\r
1819 return "kAudioHardwareUnsupportedOperationError";
\r
1821 case kAudioDeviceUnsupportedFormatError:
\r
1822 return "kAudioDeviceUnsupportedFormatError";
\r
1824 case kAudioDevicePermissionsError:
\r
1825 return "kAudioDevicePermissionsError";
\r
1828 return "CoreAudio unknown error";
\r
1832 //******************** End of __MACOSX_CORE__ *********************//
\r
1835 #if defined(__UNIX_JACK__)
\r
1837 // JACK is a low-latency audio server, originally written for the
\r
1838 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1839 // connect a number of different applications to an audio device, as
\r
1840 // well as allowing them to share audio between themselves.
\r
1842 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1843 // have ports connected to the server. The JACK server is typically
\r
1844 // started in a terminal as follows:
\r
1846 // .jackd -d alsa -d hw:0
\r
1848 // or through an interface program such as qjackctl. Many of the
\r
1849 // parameters normally set for a stream are fixed by the JACK server
\r
1850 // and can be specified when the JACK server is started. In
\r
1853 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1855 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1856 // frames, and number of buffers = 4. Once the server is running, it
\r
1857 // is not possible to override these values. If the values are not
\r
1858 // specified in the command-line, the JACK server uses default values.
\r
1860 // The JACK server does not have to be running when an instance of
\r
1861 // RtApiJack is created, though the function getDeviceCount() will
\r
1862 // report 0 devices found until JACK has been started. When no
\r
1863 // devices are available (i.e., the JACK server is not running), a
\r
1864 // stream cannot be opened.
\r
1866 #include <jack/jack.h>
\r
1867 #include <unistd.h>
\r
1870 // A structure to hold various information related to the Jack API
\r
1871 // implementation.
\r
1872 struct JackHandle {
\r
1873 jack_client_t *client;
\r
1874 jack_port_t **ports[2];
\r
1875 std::string deviceName[2];
\r
1877 pthread_cond_t condition;
\r
1878 int drainCounter; // Tracks callback counts when draining
\r
1879 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1882 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1885 static void jackSilentError( const char * ) {};
\r
1887 RtApiJack :: RtApiJack()
\r
1889 // Nothing to do here.
\r
1890 #if !defined(__RTAUDIO_DEBUG__)
\r
1891 // Turn off Jack's internal error reporting.
\r
1892 jack_set_error_function( &jackSilentError );
\r
1896 RtApiJack :: ~RtApiJack()
\r
1898 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1901 unsigned int RtApiJack :: getDeviceCount( void )
\r
1903 // See if we can become a jack client.
\r
1904 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1905 jack_status_t *status = NULL;
\r
1906 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1907 if ( client == 0 ) return 0;
\r
1909 const char **ports;
\r
1910 std::string port, previousPort;
\r
1911 unsigned int nChannels = 0, nDevices = 0;
\r
1912 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1914 // Parse the port names up to the first colon (:).
\r
1915 size_t iColon = 0;
\r
1917 port = (char *) ports[ nChannels ];
\r
1918 iColon = port.find(":");
\r
1919 if ( iColon != std::string::npos ) {
\r
1920 port = port.substr( 0, iColon + 1 );
\r
1921 if ( port != previousPort ) {
\r
1923 previousPort = port;
\r
1926 } while ( ports[++nChannels] );
\r
1930 jack_client_close( client );
\r
1934 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1936 RtAudio::DeviceInfo info;
\r
1937 info.probed = false;
\r
1939 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1940 jack_status_t *status = NULL;
\r
1941 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1942 if ( client == 0 ) {
\r
1943 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1944 error( RtAudioError::WARNING );
\r
1948 const char **ports;
\r
1949 std::string port, previousPort;
\r
1950 unsigned int nPorts = 0, nDevices = 0;
\r
1951 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1953 // Parse the port names up to the first colon (:).
\r
1954 size_t iColon = 0;
\r
1956 port = (char *) ports[ nPorts ];
\r
1957 iColon = port.find(":");
\r
1958 if ( iColon != std::string::npos ) {
\r
1959 port = port.substr( 0, iColon );
\r
1960 if ( port != previousPort ) {
\r
1961 if ( nDevices == device ) info.name = port;
\r
1963 previousPort = port;
\r
1966 } while ( ports[++nPorts] );
\r
1970 if ( device >= nDevices ) {
\r
1971 jack_client_close( client );
\r
1972 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1973 error( RtAudioError::INVALID_USE );
\r
1977 // Get the current jack server sample rate.
\r
1978 info.sampleRates.clear();
\r
1979 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1981 // Count the available ports containing the client name as device
\r
1982 // channels. Jack "input ports" equal RtAudio output channels.
\r
1983 unsigned int nChannels = 0;
\r
1984 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1986 while ( ports[ nChannels ] ) nChannels++;
\r
1988 info.outputChannels = nChannels;
\r
1991 // Jack "output ports" equal RtAudio input channels.
\r
1993 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1995 while ( ports[ nChannels ] ) nChannels++;
\r
1997 info.inputChannels = nChannels;
\r
2000 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2001 jack_client_close(client);
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 // If device opens for both playback and capture, we determine the channels.
\r
2008 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2009 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2011 // Jack always uses 32-bit floats.
\r
2012 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2014 // Jack doesn't provide default devices so we'll use the first available one.
\r
2015 if ( device == 0 && info.outputChannels > 0 )
\r
2016 info.isDefaultOutput = true;
\r
2017 if ( device == 0 && info.inputChannels > 0 )
\r
2018 info.isDefaultInput = true;
\r
2020 jack_client_close(client);
\r
2021 info.probed = true;
\r
2025 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2027 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2029 RtApiJack *object = (RtApiJack *) info->object;
\r
2030 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2035 // This function will be called by a spawned thread when the Jack
\r
2036 // server signals that it is shutting down. It is necessary to handle
\r
2037 // it this way because the jackShutdown() function must return before
\r
2038 // the jack_deactivate() function (in closeStream()) will return.
\r
2039 static void *jackCloseStream( void *ptr )
\r
2041 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2042 RtApiJack *object = (RtApiJack *) info->object;
\r
2044 object->closeStream();
\r
2046 pthread_exit( NULL );
\r
2048 static void jackShutdown( void *infoPointer )
\r
2050 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2051 RtApiJack *object = (RtApiJack *) info->object;
\r
2053 // Check current stream state. If stopped, then we'll assume this
\r
2054 // was called as a result of a call to RtApiJack::stopStream (the
\r
2055 // deactivation of a client handle causes this function to be called).
\r
2056 // If not, we'll assume the Jack server is shutting down or some
\r
2057 // other problem occurred and we should close the stream.
\r
2058 if ( object->isStreamRunning() == false ) return;
\r
2060 ThreadHandle threadId;
\r
2061 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2062 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2065 static int jackXrun( void *infoPointer )
\r
2067 JackHandle *handle = (JackHandle *) infoPointer;
\r
2069 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2070 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2075 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2076 unsigned int firstChannel, unsigned int sampleRate,
\r
2077 RtAudioFormat format, unsigned int *bufferSize,
\r
2078 RtAudio::StreamOptions *options )
\r
2080 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2082 // Look for jack server and try to become a client (only do once per stream).
\r
2083 jack_client_t *client = 0;
\r
2084 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2085 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2086 jack_status_t *status = NULL;
\r
2087 if ( options && !options->streamName.empty() )
\r
2088 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2090 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2091 if ( client == 0 ) {
\r
2092 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2093 error( RtAudioError::WARNING );
\r
2098 // The handle must have been created on an earlier pass.
\r
2099 client = handle->client;
\r
2102 const char **ports;
\r
2103 std::string port, previousPort, deviceName;
\r
2104 unsigned int nPorts = 0, nDevices = 0;
\r
2105 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2107 // Parse the port names up to the first colon (:).
\r
2108 size_t iColon = 0;
\r
2110 port = (char *) ports[ nPorts ];
\r
2111 iColon = port.find(":");
\r
2112 if ( iColon != std::string::npos ) {
\r
2113 port = port.substr( 0, iColon );
\r
2114 if ( port != previousPort ) {
\r
2115 if ( nDevices == device ) deviceName = port;
\r
2117 previousPort = port;
\r
2120 } while ( ports[++nPorts] );
\r
2124 if ( device >= nDevices ) {
\r
2125 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2129 // Count the available ports containing the client name as device
\r
2130 // channels. Jack "input ports" equal RtAudio output channels.
\r
2131 unsigned int nChannels = 0;
\r
2132 unsigned long flag = JackPortIsInput;
\r
2133 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2134 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2136 while ( ports[ nChannels ] ) nChannels++;
\r
2140 // Compare the jack ports for specified client to the requested number of channels.
\r
2141 if ( nChannels < (channels + firstChannel) ) {
\r
2142 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2143 errorText_ = errorStream_.str();
\r
2147 // Check the jack server sample rate.
\r
2148 unsigned int jackRate = jack_get_sample_rate( client );
\r
2149 if ( sampleRate != jackRate ) {
\r
2150 jack_client_close( client );
\r
2151 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2152 errorText_ = errorStream_.str();
\r
2155 stream_.sampleRate = jackRate;
\r
2157 // Get the latency of the JACK port.
\r
2158 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2159 if ( ports[ firstChannel ] ) {
\r
2160 // Added by Ge Wang
\r
2161 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2162 // the range (usually the min and max are equal)
\r
2163 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2164 // get the latency range
\r
2165 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2166 // be optimistic, use the min!
\r
2167 stream_.latency[mode] = latrange.min;
\r
2168 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2172 // The jack server always uses 32-bit floating-point data.
\r
2173 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2174 stream_.userFormat = format;
\r
2176 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2177 else stream_.userInterleaved = true;
\r
2179 // Jack always uses non-interleaved buffers.
\r
2180 stream_.deviceInterleaved[mode] = false;
\r
2182 // Jack always provides host byte-ordered data.
\r
2183 stream_.doByteSwap[mode] = false;
\r
2185 // Get the buffer size. The buffer size and number of buffers
\r
2186 // (periods) is set when the jack server is started.
\r
2187 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2188 *bufferSize = stream_.bufferSize;
\r
2190 stream_.nDeviceChannels[mode] = channels;
\r
2191 stream_.nUserChannels[mode] = channels;
\r
2193 // Set flags for buffer conversion.
\r
2194 stream_.doConvertBuffer[mode] = false;
\r
2195 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2196 stream_.doConvertBuffer[mode] = true;
\r
2197 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2198 stream_.nUserChannels[mode] > 1 )
\r
2199 stream_.doConvertBuffer[mode] = true;
\r
2201 // Allocate our JackHandle structure for the stream.
\r
2202 if ( handle == 0 ) {
\r
2204 handle = new JackHandle;
\r
2206 catch ( std::bad_alloc& ) {
\r
2207 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2211 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2212 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2215 stream_.apiHandle = (void *) handle;
\r
2216 handle->client = client;
\r
2218 handle->deviceName[mode] = deviceName;
\r
2220 // Allocate necessary internal buffers.
\r
2221 unsigned long bufferBytes;
\r
2222 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2223 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2224 if ( stream_.userBuffer[mode] == NULL ) {
\r
2225 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2229 if ( stream_.doConvertBuffer[mode] ) {
\r
2231 bool makeBuffer = true;
\r
2232 if ( mode == OUTPUT )
\r
2233 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2234 else { // mode == INPUT
\r
2235 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2236 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2237 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2238 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2242 if ( makeBuffer ) {
\r
2243 bufferBytes *= *bufferSize;
\r
2244 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2245 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2246 if ( stream_.deviceBuffer == NULL ) {
\r
2247 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2253 // Allocate memory for the Jack ports (channels) identifiers.
\r
2254 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2255 if ( handle->ports[mode] == NULL ) {
\r
2256 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2260 stream_.device[mode] = device;
\r
2261 stream_.channelOffset[mode] = firstChannel;
\r
2262 stream_.state = STREAM_STOPPED;
\r
2263 stream_.callbackInfo.object = (void *) this;
\r
2265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2266 // We had already set up the stream for output.
\r
2267 stream_.mode = DUPLEX;
\r
2269 stream_.mode = mode;
\r
2270 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2271 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2272 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2275 // Register our ports.
\r
2277 if ( mode == OUTPUT ) {
\r
2278 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2279 snprintf( label, 64, "outport %d", i );
\r
2280 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2281 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2285 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2286 snprintf( label, 64, "inport %d", i );
\r
2287 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2288 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2292 // Setup the buffer conversion information structure. We don't use
\r
2293 // buffers to do channel offsets, so we override that parameter
\r
2295 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2301 pthread_cond_destroy( &handle->condition );
\r
2302 jack_client_close( handle->client );
\r
2304 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2305 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2308 stream_.apiHandle = 0;
\r
2311 for ( int i=0; i<2; i++ ) {
\r
2312 if ( stream_.userBuffer[i] ) {
\r
2313 free( stream_.userBuffer[i] );
\r
2314 stream_.userBuffer[i] = 0;
\r
2318 if ( stream_.deviceBuffer ) {
\r
2319 free( stream_.deviceBuffer );
\r
2320 stream_.deviceBuffer = 0;
\r
2326 void RtApiJack :: closeStream( void )
\r
2328 if ( stream_.state == STREAM_CLOSED ) {
\r
2329 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2330 error( RtAudioError::WARNING );
\r
2334 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2337 if ( stream_.state == STREAM_RUNNING )
\r
2338 jack_deactivate( handle->client );
\r
2340 jack_client_close( handle->client );
\r
2344 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2345 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2346 pthread_cond_destroy( &handle->condition );
\r
2348 stream_.apiHandle = 0;
\r
2351 for ( int i=0; i<2; i++ ) {
\r
2352 if ( stream_.userBuffer[i] ) {
\r
2353 free( stream_.userBuffer[i] );
\r
2354 stream_.userBuffer[i] = 0;
\r
2358 if ( stream_.deviceBuffer ) {
\r
2359 free( stream_.deviceBuffer );
\r
2360 stream_.deviceBuffer = 0;
\r
2363 stream_.mode = UNINITIALIZED;
\r
2364 stream_.state = STREAM_CLOSED;
\r
2367 void RtApiJack :: startStream( void )
\r
2370 if ( stream_.state == STREAM_RUNNING ) {
\r
2371 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2372 error( RtAudioError::WARNING );
\r
2376 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2377 int result = jack_activate( handle->client );
\r
2379 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2383 const char **ports;
\r
2385 // Get the list of available ports.
\r
2386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2388 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2389 if ( ports == NULL) {
\r
2390 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2394 // Now make the port connections. Since RtAudio wasn't designed to
\r
2395 // allow the user to select particular channels of a device, we'll
\r
2396 // just open the first "nChannels" ports with offset.
\r
2397 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2399 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2400 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2403 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2410 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2412 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2413 if ( ports == NULL) {
\r
2414 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2418 // Now make the port connections. See note above.
\r
2419 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2421 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2422 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2425 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2432 handle->drainCounter = 0;
\r
2433 handle->internalDrain = false;
\r
2434 stream_.state = STREAM_RUNNING;
\r
2437 if ( result == 0 ) return;
\r
2438 error( RtAudioError::SYSTEM_ERROR );
\r
2441 void RtApiJack :: stopStream( void )
\r
2444 if ( stream_.state == STREAM_STOPPED ) {
\r
2445 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2446 error( RtAudioError::WARNING );
\r
2450 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2453 if ( handle->drainCounter == 0 ) {
\r
2454 handle->drainCounter = 2;
\r
2455 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2459 jack_deactivate( handle->client );
\r
2460 stream_.state = STREAM_STOPPED;
\r
2463 void RtApiJack :: abortStream( void )
\r
2466 if ( stream_.state == STREAM_STOPPED ) {
\r
2467 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2468 error( RtAudioError::WARNING );
\r
2472 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2473 handle->drainCounter = 2;
\r
2478 // This function will be called by a spawned thread when the user
\r
2479 // callback function signals that the stream should be stopped or
\r
2480 // aborted. It is necessary to handle it this way because the
\r
2481 // callbackEvent() function must return before the jack_deactivate()
\r
2482 // function will return.
\r
2483 static void *jackStopStream( void *ptr )
\r
2485 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2486 RtApiJack *object = (RtApiJack *) info->object;
\r
2488 object->stopStream();
\r
2489 pthread_exit( NULL );
\r
2492 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2494 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2495 if ( stream_.state == STREAM_CLOSED ) {
\r
2496 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2497 error( RtAudioError::WARNING );
\r
2500 if ( stream_.bufferSize != nframes ) {
\r
2501 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2502 error( RtAudioError::WARNING );
\r
2506 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2507 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 // Check if we were draining the stream and signal is finished.
\r
2510 if ( handle->drainCounter > 3 ) {
\r
2511 ThreadHandle threadId;
\r
2513 stream_.state = STREAM_STOPPING;
\r
2514 if ( handle->internalDrain == true )
\r
2515 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2517 pthread_cond_signal( &handle->condition );
\r
2521 // Invoke user callback first, to get fresh output data.
\r
2522 if ( handle->drainCounter == 0 ) {
\r
2523 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2524 double streamTime = getStreamTime();
\r
2525 RtAudioStreamStatus status = 0;
\r
2526 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2527 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2528 handle->xrun[0] = false;
\r
2530 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2531 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2532 handle->xrun[1] = false;
\r
2534 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2535 stream_.bufferSize, streamTime, status, info->userData );
\r
2536 if ( cbReturnValue == 2 ) {
\r
2537 stream_.state = STREAM_STOPPING;
\r
2538 handle->drainCounter = 2;
\r
2540 pthread_create( &id, NULL, jackStopStream, info );
\r
2543 else if ( cbReturnValue == 1 ) {
\r
2544 handle->drainCounter = 1;
\r
2545 handle->internalDrain = true;
\r
2549 jack_default_audio_sample_t *jackbuffer;
\r
2550 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2551 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2553 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2555 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2556 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2557 memset( jackbuffer, 0, bufferBytes );
\r
2561 else if ( stream_.doConvertBuffer[0] ) {
\r
2563 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2565 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2566 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2567 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2570 else { // no buffer conversion
\r
2571 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2572 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2573 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2578 // Don't bother draining input
\r
2579 if ( handle->drainCounter ) {
\r
2580 handle->drainCounter++;
\r
2584 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2586 if ( stream_.doConvertBuffer[1] ) {
\r
2587 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2588 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2589 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2591 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2593 else { // no buffer conversion
\r
2594 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2595 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2596 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2602 RtApi::tickStreamTime();
\r
2605 //******************** End of __UNIX_JACK__ *********************//
\r
2608 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2610 // The ASIO API is designed around a callback scheme, so this
\r
2611 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2612 // Jack. The primary constraint with ASIO is that it only allows
\r
2613 // access to a single driver at a time. Thus, it is not possible to
\r
2614 // have more than one simultaneous RtAudio stream.
\r
2616 // This implementation also requires a number of external ASIO files
\r
2617 // and a few global variables. The ASIO callback scheme does not
\r
2618 // allow for the passing of user data, so we must create a global
\r
2619 // pointer to our callbackInfo structure.
\r
2621 // On unix systems, we make use of a pthread condition variable.
\r
2622 // Since there is no equivalent in Windows, I hacked something based
\r
2623 // on information found in
\r
2624 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2626 #include "asiosys.h"
\r
2628 #include "iasiothiscallresolver.h"
\r
2629 #include "asiodrivers.h"
\r
2632 static AsioDrivers drivers;
\r
2633 static ASIOCallbacks asioCallbacks;
\r
2634 static ASIODriverInfo driverInfo;
\r
2635 static CallbackInfo *asioCallbackInfo;
\r
2636 static bool asioXRun;
\r
2638 struct AsioHandle {
\r
2639 int drainCounter; // Tracks callback counts when draining
\r
2640 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2641 ASIOBufferInfo *bufferInfos;
\r
2645 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2648 // Function declarations (definitions at end of section)
\r
2649 static const char* getAsioErrorString( ASIOError result );
\r
2650 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2651 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2653 RtApiAsio :: RtApiAsio()
\r
2655 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2656 // CoInitialize beforehand, but it must be for appartment threading
\r
2657 // (in which case, CoInitilialize will return S_FALSE here).
\r
2658 coInitialized_ = false;
\r
2659 HRESULT hr = CoInitialize( NULL );
\r
2660 if ( FAILED(hr) ) {
\r
2661 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2662 error( RtAudioError::WARNING );
\r
2664 coInitialized_ = true;
\r
2666 drivers.removeCurrentDriver();
\r
2667 driverInfo.asioVersion = 2;
\r
2669 // See note in DirectSound implementation about GetDesktopWindow().
\r
2670 driverInfo.sysRef = GetForegroundWindow();
\r
2673 RtApiAsio :: ~RtApiAsio()
\r
2675 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2676 if ( coInitialized_ ) CoUninitialize();
\r
2679 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2681 return (unsigned int) drivers.asioGetNumDev();
\r
2684 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2686 RtAudio::DeviceInfo info;
\r
2687 info.probed = false;
\r
2690 unsigned int nDevices = getDeviceCount();
\r
2691 if ( nDevices == 0 ) {
\r
2692 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2693 error( RtAudioError::INVALID_USE );
\r
2697 if ( device >= nDevices ) {
\r
2698 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2699 error( RtAudioError::INVALID_USE );
\r
2703 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2704 if ( stream_.state != STREAM_CLOSED ) {
\r
2705 if ( device >= devices_.size() ) {
\r
2706 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2707 error( RtAudioError::WARNING );
\r
2710 return devices_[ device ];
\r
2713 char driverName[32];
\r
2714 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2715 if ( result != ASE_OK ) {
\r
2716 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2717 errorText_ = errorStream_.str();
\r
2718 error( RtAudioError::WARNING );
\r
2722 info.name = driverName;
\r
2724 if ( !drivers.loadDriver( driverName ) ) {
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtAudioError::WARNING );
\r
2731 result = ASIOInit( &driverInfo );
\r
2732 if ( result != ASE_OK ) {
\r
2733 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2734 errorText_ = errorStream_.str();
\r
2735 error( RtAudioError::WARNING );
\r
2739 // Determine the device channel information.
\r
2740 long inputChannels, outputChannels;
\r
2741 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2742 if ( result != ASE_OK ) {
\r
2743 drivers.removeCurrentDriver();
\r
2744 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2745 errorText_ = errorStream_.str();
\r
2746 error( RtAudioError::WARNING );
\r
2750 info.outputChannels = outputChannels;
\r
2751 info.inputChannels = inputChannels;
\r
2752 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2753 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2755 // Determine the supported sample rates.
\r
2756 info.sampleRates.clear();
\r
2757 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2758 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2759 if ( result == ASE_OK )
\r
2760 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2763 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2764 ASIOChannelInfo channelInfo;
\r
2765 channelInfo.channel = 0;
\r
2766 channelInfo.isInput = true;
\r
2767 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2768 result = ASIOGetChannelInfo( &channelInfo );
\r
2769 if ( result != ASE_OK ) {
\r
2770 drivers.removeCurrentDriver();
\r
2771 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2772 errorText_ = errorStream_.str();
\r
2773 error( RtAudioError::WARNING );
\r
2777 info.nativeFormats = 0;
\r
2778 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT16;
\r
2780 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_SINT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2784 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2785 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2786 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2787 info.nativeFormats |= RTAUDIO_SINT24;
\r
2789 if ( info.outputChannels > 0 )
\r
2790 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2791 if ( info.inputChannels > 0 )
\r
2792 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2794 info.probed = true;
\r
2795 drivers.removeCurrentDriver();
\r
2799 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2801 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2802 object->callbackEvent( index );
\r
2805 void RtApiAsio :: saveDeviceInfo( void )
\r
2809 unsigned int nDevices = getDeviceCount();
\r
2810 devices_.resize( nDevices );
\r
2811 for ( unsigned int i=0; i<nDevices; i++ )
\r
2812 devices_[i] = getDeviceInfo( i );
\r
2815 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2816 unsigned int firstChannel, unsigned int sampleRate,
\r
2817 RtAudioFormat format, unsigned int *bufferSize,
\r
2818 RtAudio::StreamOptions *options )
\r
2820 // For ASIO, a duplex stream MUST use the same driver.
\r
2821 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2822 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2826 char driverName[32];
\r
2827 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2828 if ( result != ASE_OK ) {
\r
2829 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2830 errorText_ = errorStream_.str();
\r
2834 // Only load the driver once for duplex stream.
\r
2835 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2836 // The getDeviceInfo() function will not work when a stream is open
\r
2837 // because ASIO does not allow multiple devices to run at the same
\r
2838 // time. Thus, we'll probe the system before opening a stream and
\r
2839 // save the results for use by getDeviceInfo().
\r
2840 this->saveDeviceInfo();
\r
2842 if ( !drivers.loadDriver( driverName ) ) {
\r
2843 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2844 errorText_ = errorStream_.str();
\r
2848 result = ASIOInit( &driverInfo );
\r
2849 if ( result != ASE_OK ) {
\r
2850 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2851 errorText_ = errorStream_.str();
\r
2856 // Check the device channel count.
\r
2857 long inputChannels, outputChannels;
\r
2858 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2859 if ( result != ASE_OK ) {
\r
2860 drivers.removeCurrentDriver();
\r
2861 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2862 errorText_ = errorStream_.str();
\r
2866 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2867 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2870 errorText_ = errorStream_.str();
\r
2873 stream_.nDeviceChannels[mode] = channels;
\r
2874 stream_.nUserChannels[mode] = channels;
\r
2875 stream_.channelOffset[mode] = firstChannel;
\r
2877 // Verify the sample rate is supported.
\r
2878 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2879 if ( result != ASE_OK ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Get the current sample rate
\r
2887 ASIOSampleRate currentRate;
\r
2888 result = ASIOGetSampleRate( ¤tRate );
\r
2889 if ( result != ASE_OK ) {
\r
2890 drivers.removeCurrentDriver();
\r
2891 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2892 errorText_ = errorStream_.str();
\r
2896 // Set the sample rate only if necessary
\r
2897 if ( currentRate != sampleRate ) {
\r
2898 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2899 if ( result != ASE_OK ) {
\r
2900 drivers.removeCurrentDriver();
\r
2901 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2902 errorText_ = errorStream_.str();
\r
2907 // Determine the driver data type.
\r
2908 ASIOChannelInfo channelInfo;
\r
2909 channelInfo.channel = 0;
\r
2910 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2911 else channelInfo.isInput = true;
\r
2912 result = ASIOGetChannelInfo( &channelInfo );
\r
2913 if ( result != ASE_OK ) {
\r
2914 drivers.removeCurrentDriver();
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2916 errorText_ = errorStream_.str();
\r
2920 // Assuming WINDOWS host is always little-endian.
\r
2921 stream_.doByteSwap[mode] = false;
\r
2922 stream_.userFormat = format;
\r
2923 stream_.deviceFormat[mode] = 0;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2925 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2926 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2928 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2930 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2932 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2933 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2934 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2936 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2937 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2938 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2940 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2941 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2942 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2945 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2946 drivers.removeCurrentDriver();
\r
2947 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2948 errorText_ = errorStream_.str();
\r
2952 // Set the buffer size. For a duplex stream, this will end up
\r
2953 // setting the buffer size based on the input constraints, which
\r
2955 long minSize, maxSize, preferSize, granularity;
\r
2956 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2957 if ( result != ASE_OK ) {
\r
2958 drivers.removeCurrentDriver();
\r
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2960 errorText_ = errorStream_.str();
\r
2964 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2965 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2966 else if ( granularity == -1 ) {
\r
2967 // Make sure bufferSize is a power of two.
\r
2968 int log2_of_min_size = 0;
\r
2969 int log2_of_max_size = 0;
\r
2971 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2972 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2973 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2976 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2977 int min_delta_num = log2_of_min_size;
\r
2979 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2980 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2981 if (current_delta < min_delta) {
\r
2982 min_delta = current_delta;
\r
2983 min_delta_num = i;
\r
2987 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2988 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2989 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2991 else if ( granularity != 0 ) {
\r
2992 // Set to an even multiple of granularity, rounding up.
\r
2993 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2996 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2997 drivers.removeCurrentDriver();
\r
2998 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3002 stream_.bufferSize = *bufferSize;
\r
3003 stream_.nBuffers = 2;
\r
3005 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3006 else stream_.userInterleaved = true;
\r
3008 // ASIO always uses non-interleaved buffers.
\r
3009 stream_.deviceInterleaved[mode] = false;
\r
3011 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3012 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3013 if ( handle == 0 ) {
\r
3015 handle = new AsioHandle;
\r
3017 catch ( std::bad_alloc& ) {
\r
3018 //if ( handle == NULL ) {
\r
3019 drivers.removeCurrentDriver();
\r
3020 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3023 handle->bufferInfos = 0;
\r
3025 // Create a manual-reset event.
\r
3026 handle->condition = CreateEvent( NULL, // no security
\r
3027 TRUE, // manual-reset
\r
3028 FALSE, // non-signaled initially
\r
3029 NULL ); // unnamed
\r
3030 stream_.apiHandle = (void *) handle;
\r
3033 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3034 // and output separately, we'll have to dispose of previously
\r
3035 // created output buffers for a duplex stream.
\r
3036 long inputLatency, outputLatency;
\r
3037 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3038 ASIODisposeBuffers();
\r
3039 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3042 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3043 bool buffersAllocated = false;
\r
3044 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3045 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3046 if ( handle->bufferInfos == NULL ) {
\r
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3048 errorText_ = errorStream_.str();
\r
3052 ASIOBufferInfo *infos;
\r
3053 infos = handle->bufferInfos;
\r
3054 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3055 infos->isInput = ASIOFalse;
\r
3056 infos->channelNum = i + stream_.channelOffset[0];
\r
3057 infos->buffers[0] = infos->buffers[1] = 0;
\r
3059 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3060 infos->isInput = ASIOTrue;
\r
3061 infos->channelNum = i + stream_.channelOffset[1];
\r
3062 infos->buffers[0] = infos->buffers[1] = 0;
\r
3065 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3066 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3067 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3068 asioCallbacks.asioMessage = &asioMessages;
\r
3069 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3070 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3071 if ( result != ASE_OK ) {
\r
3072 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3073 errorText_ = errorStream_.str();
\r
3076 buffersAllocated = true;
\r
3078 // Set flags for buffer conversion.
\r
3079 stream_.doConvertBuffer[mode] = false;
\r
3080 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3081 stream_.doConvertBuffer[mode] = true;
\r
3082 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3083 stream_.nUserChannels[mode] > 1 )
\r
3084 stream_.doConvertBuffer[mode] = true;
\r
3086 // Allocate necessary internal buffers
\r
3087 unsigned long bufferBytes;
\r
3088 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3089 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3090 if ( stream_.userBuffer[mode] == NULL ) {
\r
3091 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3095 if ( stream_.doConvertBuffer[mode] ) {
\r
3097 bool makeBuffer = true;
\r
3098 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3099 if ( mode == INPUT ) {
\r
3100 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3101 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3102 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3106 if ( makeBuffer ) {
\r
3107 bufferBytes *= *bufferSize;
\r
3108 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3109 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3110 if ( stream_.deviceBuffer == NULL ) {
\r
3111 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3117 stream_.sampleRate = sampleRate;
\r
3118 stream_.device[mode] = device;
\r
3119 stream_.state = STREAM_STOPPED;
\r
3120 asioCallbackInfo = &stream_.callbackInfo;
\r
3121 stream_.callbackInfo.object = (void *) this;
\r
3122 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3123 // We had already set up an output stream.
\r
3124 stream_.mode = DUPLEX;
\r
3126 stream_.mode = mode;
\r
3128 // Determine device latencies
\r
3129 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3130 if ( result != ASE_OK ) {
\r
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3132 errorText_ = errorStream_.str();
\r
3133 error( RtAudioError::WARNING); // warn but don't fail
\r
3136 stream_.latency[0] = outputLatency;
\r
3137 stream_.latency[1] = inputLatency;
\r
3140 // Setup the buffer conversion information structure. We don't use
\r
3141 // buffers to do channel offsets, so we override that parameter
\r
3143 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3148 if ( buffersAllocated )
\r
3149 ASIODisposeBuffers();
\r
3150 drivers.removeCurrentDriver();
\r
3153 CloseHandle( handle->condition );
\r
3154 if ( handle->bufferInfos )
\r
3155 free( handle->bufferInfos );
\r
3157 stream_.apiHandle = 0;
\r
3160 for ( int i=0; i<2; i++ ) {
\r
3161 if ( stream_.userBuffer[i] ) {
\r
3162 free( stream_.userBuffer[i] );
\r
3163 stream_.userBuffer[i] = 0;
\r
3167 if ( stream_.deviceBuffer ) {
\r
3168 free( stream_.deviceBuffer );
\r
3169 stream_.deviceBuffer = 0;
\r
3175 void RtApiAsio :: closeStream()
\r
3177 if ( stream_.state == STREAM_CLOSED ) {
\r
3178 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3179 error( RtAudioError::WARNING );
\r
3183 if ( stream_.state == STREAM_RUNNING ) {
\r
3184 stream_.state = STREAM_STOPPED;
\r
3187 ASIODisposeBuffers();
\r
3188 drivers.removeCurrentDriver();
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3192 CloseHandle( handle->condition );
\r
3193 if ( handle->bufferInfos )
\r
3194 free( handle->bufferInfos );
\r
3196 stream_.apiHandle = 0;
\r
3199 for ( int i=0; i<2; i++ ) {
\r
3200 if ( stream_.userBuffer[i] ) {
\r
3201 free( stream_.userBuffer[i] );
\r
3202 stream_.userBuffer[i] = 0;
\r
3206 if ( stream_.deviceBuffer ) {
\r
3207 free( stream_.deviceBuffer );
\r
3208 stream_.deviceBuffer = 0;
\r
3211 stream_.mode = UNINITIALIZED;
\r
3212 stream_.state = STREAM_CLOSED;
\r
3215 bool stopThreadCalled = false;
\r
3217 void RtApiAsio :: startStream()
\r
3220 if ( stream_.state == STREAM_RUNNING ) {
\r
3221 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3222 error( RtAudioError::WARNING );
\r
3226 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3227 ASIOError result = ASIOStart();
\r
3228 if ( result != ASE_OK ) {
\r
3229 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3230 errorText_ = errorStream_.str();
\r
3234 handle->drainCounter = 0;
\r
3235 handle->internalDrain = false;
\r
3236 ResetEvent( handle->condition );
\r
3237 stream_.state = STREAM_RUNNING;
\r
3241 stopThreadCalled = false;
\r
3243 if ( result == ASE_OK ) return;
\r
3244 error( RtAudioError::SYSTEM_ERROR );
\r
3247 void RtApiAsio :: stopStream()
\r
3250 if ( stream_.state == STREAM_STOPPED ) {
\r
3251 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3252 error( RtAudioError::WARNING );
\r
3256 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3258 if ( handle->drainCounter == 0 ) {
\r
3259 handle->drainCounter = 2;
\r
3260 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3264 stream_.state = STREAM_STOPPED;
\r
3266 ASIOError result = ASIOStop();
\r
3267 if ( result != ASE_OK ) {
\r
3268 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3269 errorText_ = errorStream_.str();
\r
3272 if ( result == ASE_OK ) return;
\r
3273 error( RtAudioError::SYSTEM_ERROR );
\r
3276 void RtApiAsio :: abortStream()
\r
3279 if ( stream_.state == STREAM_STOPPED ) {
\r
3280 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3281 error( RtAudioError::WARNING );
\r
3285 // The following lines were commented-out because some behavior was
\r
3286 // noted where the device buffers need to be zeroed to avoid
\r
3287 // continuing sound, even when the device buffers are completely
\r
3288 // disposed. So now, calling abort is the same as calling stop.
\r
3289 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3290 // handle->drainCounter = 2;
\r
3294 // This function will be called by a spawned thread when the user
\r
3295 // callback function signals that the stream should be stopped or
\r
3296 // aborted. It is necessary to handle it this way because the
\r
3297 // callbackEvent() function must return before the ASIOStop()
\r
3298 // function will return.
\r
3299 static unsigned __stdcall asioStopStream( void *ptr )
\r
3301 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3302 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3304 object->stopStream();
\r
3305 _endthreadex( 0 );
\r
3309 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3311 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3312 if ( stream_.state == STREAM_CLOSED ) {
\r
3313 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3314 error( RtAudioError::WARNING );
\r
3318 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3321 // Check if we were draining the stream and signal if finished.
\r
3322 if ( handle->drainCounter > 3 ) {
\r
3324 stream_.state = STREAM_STOPPING;
\r
3325 if ( handle->internalDrain == false )
\r
3326 SetEvent( handle->condition );
\r
3327 else { // spawn a thread to stop the stream
\r
3328 unsigned threadId;
\r
3329 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3330 &stream_.callbackInfo, 0, &threadId );
\r
3335 // Invoke user callback to get fresh output data UNLESS we are
\r
3336 // draining stream.
\r
3337 if ( handle->drainCounter == 0 ) {
\r
3338 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3339 double streamTime = getStreamTime();
\r
3340 RtAudioStreamStatus status = 0;
\r
3341 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3342 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3345 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3346 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3349 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3350 stream_.bufferSize, streamTime, status, info->userData );
\r
3351 if ( cbReturnValue == 2 ) {
\r
3352 stream_.state = STREAM_STOPPING;
\r
3353 handle->drainCounter = 2;
\r
3354 unsigned threadId;
\r
3355 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3356 &stream_.callbackInfo, 0, &threadId );
\r
3359 else if ( cbReturnValue == 1 ) {
\r
3360 handle->drainCounter = 1;
\r
3361 handle->internalDrain = true;
\r
3365 unsigned int nChannels, bufferBytes, i, j;
\r
3366 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3369 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3371 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3373 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3374 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3375 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3379 else if ( stream_.doConvertBuffer[0] ) {
\r
3381 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3382 if ( stream_.doByteSwap[0] )
\r
3383 byteSwapBuffer( stream_.deviceBuffer,
\r
3384 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3385 stream_.deviceFormat[0] );
\r
3387 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3388 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3389 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3390 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3396 if ( stream_.doByteSwap[0] )
\r
3397 byteSwapBuffer( stream_.userBuffer[0],
\r
3398 stream_.bufferSize * stream_.nUserChannels[0],
\r
3399 stream_.userFormat );
\r
3401 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3402 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3403 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3404 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3410 // Don't bother draining input
\r
3411 if ( handle->drainCounter ) {
\r
3412 handle->drainCounter++;
\r
3416 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3418 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3420 if (stream_.doConvertBuffer[1]) {
\r
3422 // Always interleave ASIO input data.
\r
3423 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3424 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3425 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3426 handle->bufferInfos[i].buffers[bufferIndex],
\r
3430 if ( stream_.doByteSwap[1] )
\r
3431 byteSwapBuffer( stream_.deviceBuffer,
\r
3432 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3433 stream_.deviceFormat[1] );
\r
3434 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3438 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3439 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3440 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3441 handle->bufferInfos[i].buffers[bufferIndex],
\r
3446 if ( stream_.doByteSwap[1] )
\r
3447 byteSwapBuffer( stream_.userBuffer[1],
\r
3448 stream_.bufferSize * stream_.nUserChannels[1],
\r
3449 stream_.userFormat );
\r
3454 // The following call was suggested by Malte Clasen. While the API
\r
3455 // documentation indicates it should not be required, some device
\r
3456 // drivers apparently do not function correctly without it.
\r
3457 ASIOOutputReady();
\r
3459 RtApi::tickStreamTime();
\r
3463 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3465 // The ASIO documentation says that this usually only happens during
\r
3466 // external sync. Audio processing is not stopped by the driver,
\r
3467 // actual sample rate might not have even changed, maybe only the
\r
3468 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3471 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3473 object->stopStream();
\r
3475 catch ( RtAudioError &exception ) {
\r
3476 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3480 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3483 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3487 switch( selector ) {
\r
3488 case kAsioSelectorSupported:
\r
3489 if ( value == kAsioResetRequest
\r
3490 || value == kAsioEngineVersion
\r
3491 || value == kAsioResyncRequest
\r
3492 || value == kAsioLatenciesChanged
\r
3493 // The following three were added for ASIO 2.0, you don't
\r
3494 // necessarily have to support them.
\r
3495 || value == kAsioSupportsTimeInfo
\r
3496 || value == kAsioSupportsTimeCode
\r
3497 || value == kAsioSupportsInputMonitor)
\r
3500 case kAsioResetRequest:
\r
3501 // Defer the task and perform the reset of the driver during the
\r
3502 // next "safe" situation. You cannot reset the driver right now,
\r
3503 // as this code is called from the driver. Reset the driver is
\r
3504 // done by completely destruct is. I.e. ASIOStop(),
\r
3505 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3507 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3510 case kAsioResyncRequest:
\r
3511 // This informs the application that the driver encountered some
\r
3512 // non-fatal data loss. It is used for synchronization purposes
\r
3513 // of different media. Added mainly to work around the Win16Mutex
\r
3514 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3515 // which could lose data because the Mutex was held too long by
\r
3516 // another thread. However a driver can issue it in other
\r
3517 // situations, too.
\r
3518 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3522 case kAsioLatenciesChanged:
\r
3523 // This will inform the host application that the drivers were
\r
3524 // latencies changed. Beware, it this does not mean that the
\r
3525 // buffer sizes have changed! You might need to update internal
\r
3527 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3530 case kAsioEngineVersion:
\r
3531 // Return the supported ASIO version of the host application. If
\r
3532 // a host application does not implement this selector, ASIO 1.0
\r
3533 // is assumed by the driver.
\r
3536 case kAsioSupportsTimeInfo:
\r
3537 // Informs the driver whether the
\r
3538 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3539 // For compatibility with ASIO 1.0 drivers the host application
\r
3540 // should always support the "old" bufferSwitch method, too.
\r
3543 case kAsioSupportsTimeCode:
\r
3544 // Informs the driver whether application is interested in time
\r
3545 // code info. If an application does not need to know about time
\r
3546 // code, the driver has less work to do.
\r
3553 static const char* getAsioErrorString( ASIOError result )
\r
3558 const char*message;
\r
3561 static const Messages m[] =
\r
3563 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3564 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3565 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3566 { ASE_InvalidMode, "Invalid mode." },
\r
3567 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3568 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3569 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3572 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3573 if ( m[i].value == result ) return m[i].message;
\r
3575 return "Unknown error.";
\r
3578 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3582 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3584 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3585 // - Introduces support for the Windows WASAPI API
\r
3586 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3587 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3588 // - Includes automatic internal conversion of sample rate, buffer size and channel count
\r
3593 #include <audioclient.h>
\r
3595 #include <mmdeviceapi.h>
\r
3596 #include <functiondiscoverykeys_devpkey.h>
\r
3598 //=============================================================================
\r
3600 #define SAFE_RELEASE( objectPtr )\
\r
3603 objectPtr->Release();\
\r
3604 objectPtr = NULL;\
\r
3607 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3609 //-----------------------------------------------------------------------------
\r
3611 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3612 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3613 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3614 // provide intermediate storage for read / write synchronization.
\r
3615 class WasapiBuffer
\r
3619 : buffer_( NULL ),
\r
3628 // sets the length of the internal ring buffer
\r
3629 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3632 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3634 bufferSize_ = bufferSize;
\r
3639 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3640 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3642 if ( !buffer || // incoming buffer is NULL
\r
3643 bufferSize == 0 || // incoming buffer has no data
\r
3644 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3649 unsigned int relOutIndex = outIndex_;
\r
3650 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3651 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3652 relOutIndex += bufferSize_;
\r
3655 // "in" index can end on the "out" index but cannot begin at it
\r
3656 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3657 return false; // not enough space between "in" index and "out" index
\r
3660 // copy buffer from external to internal
\r
3661 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3662 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3663 int fromInSize = bufferSize - fromZeroSize;
\r
3667 case RTAUDIO_SINT8:
\r
3668 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3669 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3671 case RTAUDIO_SINT16:
\r
3672 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3673 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3675 case RTAUDIO_SINT24:
\r
3676 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3677 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3679 case RTAUDIO_SINT32:
\r
3680 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3681 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3683 case RTAUDIO_FLOAT32:
\r
3684 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3685 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3687 case RTAUDIO_FLOAT64:
\r
3688 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3689 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3693 // update "in" index
\r
3694 inIndex_ += bufferSize;
\r
3695 inIndex_ %= bufferSize_;
\r
3700 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3701 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3703 if ( !buffer || // incoming buffer is NULL
\r
3704 bufferSize == 0 || // incoming buffer has no data
\r
3705 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3710 unsigned int relInIndex = inIndex_;
\r
3711 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3712 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3713 relInIndex += bufferSize_;
\r
3716 // "out" index can begin at and end on the "in" index
\r
3717 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3718 return false; // not enough space between "out" index and "in" index
\r
3721 // copy buffer from internal to external
\r
3722 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3723 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3724 int fromOutSize = bufferSize - fromZeroSize;
\r
3728 case RTAUDIO_SINT8:
\r
3729 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3730 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3732 case RTAUDIO_SINT16:
\r
3733 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3734 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3736 case RTAUDIO_SINT24:
\r
3737 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3738 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3740 case RTAUDIO_SINT32:
\r
3741 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3742 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3744 case RTAUDIO_FLOAT32:
\r
3745 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3746 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3748 case RTAUDIO_FLOAT64:
\r
3749 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3750 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3754 // update "out" index
\r
3755 outIndex_ += bufferSize;
\r
3756 outIndex_ %= bufferSize_;
\r
3763 unsigned int bufferSize_;
\r
3764 unsigned int inIndex_;
\r
3765 unsigned int outIndex_;
\r
3768 //-----------------------------------------------------------------------------
\r
3770 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3771 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3772 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3773 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3774 // one rate and its multiple.
\r
3775 void convertBufferWasapi( char* outBuffer,
\r
3776 const char* inBuffer,
\r
3777 const unsigned int& inChannelCount,
\r
3778 const unsigned int& outChannelCount,
\r
3779 const unsigned int& inSampleRate,
\r
3780 const unsigned int& outSampleRate,
\r
3781 const unsigned int& inSampleCount,
\r
3782 unsigned int& outSampleCount,
\r
3783 const RtAudioFormat& format )
\r
3785 // calculate the new outSampleCount and relative sampleStep
\r
3786 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3787 float sampleStep = 1.0f / sampleRatio;
\r
3788 float inSampleFraction = 0.0f;
\r
3789 unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );
\r
3791 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3793 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3794 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3796 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3800 case RTAUDIO_SINT8:
\r
3801 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3803 case RTAUDIO_SINT16:
\r
3804 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3806 case RTAUDIO_SINT24:
\r
3807 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3809 case RTAUDIO_SINT32:
\r
3810 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3812 case RTAUDIO_FLOAT32:
\r
3813 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3815 case RTAUDIO_FLOAT64:
\r
3816 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3820 // jump to next in sample
\r
3821 inSampleFraction += sampleStep;
\r
3825 //-----------------------------------------------------------------------------
\r
3827 // A structure to hold various information related to the WASAPI implementation.
\r
3828 struct WasapiHandle
\r
3830 IAudioClient* captureAudioClient;
\r
3831 IAudioClient* renderAudioClient;
\r
3832 IAudioCaptureClient* captureClient;
\r
3833 IAudioRenderClient* renderClient;
\r
3834 HANDLE captureEvent;
\r
3835 HANDLE renderEvent;
\r
3838 : captureAudioClient( NULL ),
\r
3839 renderAudioClient( NULL ),
\r
3840 captureClient( NULL ),
\r
3841 renderClient( NULL ),
\r
3842 captureEvent( NULL ),
\r
3843 renderEvent( NULL ) {}
\r
3846 //=============================================================================
\r
3848 RtApiWasapi::RtApiWasapi()
\r
3849 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3851 // WASAPI can run either apartment or multi-threaded
\r
3852 HRESULT hr = CoInitialize( NULL );
\r
3854 if ( !FAILED( hr ) )
\r
3855 coInitialized_ = true;
\r
3857 // Instantiate device enumerator
\r
3858 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3859 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3860 ( void** ) &deviceEnumerator_ );
\r
3862 if ( FAILED( hr ) ) {
\r
3863 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3864 error( RtAudioError::DRIVER_ERROR );
\r
3868 //-----------------------------------------------------------------------------
\r
3870 RtApiWasapi::~RtApiWasapi()
\r
3872 // if this object previously called CoInitialize()
\r
3873 if ( coInitialized_ ) {
\r
3877 if ( stream_.state != STREAM_CLOSED ) {
\r
3881 SAFE_RELEASE( deviceEnumerator_ );
\r
3884 //=============================================================================
\r
3886 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3888 unsigned int captureDeviceCount = 0;
\r
3889 unsigned int renderDeviceCount = 0;
\r
3891 IMMDeviceCollection* captureDevices = NULL;
\r
3892 IMMDeviceCollection* renderDevices = NULL;
\r
3894 // Count capture devices
\r
3895 errorText_.clear();
\r
3896 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3897 if ( FAILED( hr ) ) {
\r
3898 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3902 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3903 if ( FAILED( hr ) ) {
\r
3904 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3908 // Count render devices
\r
3909 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3910 if ( FAILED( hr ) ) {
\r
3911 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3915 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3916 if ( FAILED( hr ) ) {
\r
3917 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3922 // release all references
\r
3923 SAFE_RELEASE( captureDevices );
\r
3924 SAFE_RELEASE( renderDevices );
\r
3926 if ( errorText_.empty() )
\r
3927 return captureDeviceCount + renderDeviceCount;
\r
3929 error( RtAudioError::DRIVER_ERROR );
\r
3933 //-----------------------------------------------------------------------------
\r
3935 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3937 RtAudio::DeviceInfo info;
\r
3938 unsigned int captureDeviceCount = 0;
\r
3939 unsigned int renderDeviceCount = 0;
\r
3940 std::wstring deviceName;
\r
3941 std::string defaultDeviceName;
\r
3942 bool isCaptureDevice = false;
\r
3944 PROPVARIANT deviceNameProp;
\r
3945 PROPVARIANT defaultDeviceNameProp;
\r
3947 IMMDeviceCollection* captureDevices = NULL;
\r
3948 IMMDeviceCollection* renderDevices = NULL;
\r
3949 IMMDevice* devicePtr = NULL;
\r
3950 IMMDevice* defaultDevicePtr = NULL;
\r
3951 IAudioClient* audioClient = NULL;
\r
3952 IPropertyStore* devicePropStore = NULL;
\r
3953 IPropertyStore* defaultDevicePropStore = NULL;
\r
3955 WAVEFORMATEX* deviceFormat = NULL;
\r
3956 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3959 info.probed = false;
\r
3961 // Count capture devices
\r
3962 errorText_.clear();
\r
3963 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3964 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3965 if ( FAILED( hr ) ) {
\r
3966 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3970 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3971 if ( FAILED( hr ) ) {
\r
3972 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3976 // Count render devices
\r
3977 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3978 if ( FAILED( hr ) ) {
\r
3979 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3983 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3984 if ( FAILED( hr ) ) {
\r
3985 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3989 // validate device index
\r
3990 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3991 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3992 errorType = RtAudioError::INVALID_USE;
\r
3996 // determine whether index falls within capture or render devices
\r
3997 if ( device >= renderDeviceCount ) {
\r
3998 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3999 if ( FAILED( hr ) ) {
\r
4000 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4003 isCaptureDevice = true;
\r
4006 hr = renderDevices->Item( device, &devicePtr );
\r
4007 if ( FAILED( hr ) ) {
\r
4008 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4011 isCaptureDevice = false;
\r
4014 // get default device name
\r
4015 if ( isCaptureDevice ) {
\r
4016 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4017 if ( FAILED( hr ) ) {
\r
4018 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4023 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4024 if ( FAILED( hr ) ) {
\r
4025 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4030 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4031 if ( FAILED( hr ) ) {
\r
4032 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4035 PropVariantInit( &defaultDeviceNameProp );
\r
4037 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4038 if ( FAILED( hr ) ) {
\r
4039 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4043 deviceName = defaultDeviceNameProp.pwszVal;
\r
4044 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4047 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4048 if ( FAILED( hr ) ) {
\r
4049 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4053 PropVariantInit( &deviceNameProp );
\r
4055 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4056 if ( FAILED( hr ) ) {
\r
4057 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4061 deviceName = deviceNameProp.pwszVal;
\r
4062 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4065 if ( isCaptureDevice ) {
\r
4066 info.isDefaultInput = info.name == defaultDeviceName;
\r
4067 info.isDefaultOutput = false;
\r
4070 info.isDefaultInput = false;
\r
4071 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4075 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4076 if ( FAILED( hr ) ) {
\r
4077 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4081 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4082 if ( FAILED( hr ) ) {
\r
4083 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4087 if ( isCaptureDevice ) {
\r
4088 info.inputChannels = deviceFormat->nChannels;
\r
4089 info.outputChannels = 0;
\r
4090 info.duplexChannels = 0;
\r
4093 info.inputChannels = 0;
\r
4094 info.outputChannels = deviceFormat->nChannels;
\r
4095 info.duplexChannels = 0;
\r
4099 info.sampleRates.clear();
\r
4101 // allow support for all sample rates as we have a built-in sample rate converter
\r
4102 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4103 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4107 info.nativeFormats = 0;
\r
4109 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4110 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4111 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4113 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4114 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4116 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4117 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4120 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4121 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4122 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4124 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4125 info.nativeFormats |= RTAUDIO_SINT8;
\r
4127 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4128 info.nativeFormats |= RTAUDIO_SINT16;
\r
4130 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4131 info.nativeFormats |= RTAUDIO_SINT24;
\r
4133 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4134 info.nativeFormats |= RTAUDIO_SINT32;
\r
4139 info.probed = true;
\r
4142 // release all references
\r
4143 PropVariantClear( &deviceNameProp );
\r
4144 PropVariantClear( &defaultDeviceNameProp );
\r
4146 SAFE_RELEASE( captureDevices );
\r
4147 SAFE_RELEASE( renderDevices );
\r
4148 SAFE_RELEASE( devicePtr );
\r
4149 SAFE_RELEASE( defaultDevicePtr );
\r
4150 SAFE_RELEASE( audioClient );
\r
4151 SAFE_RELEASE( devicePropStore );
\r
4152 SAFE_RELEASE( defaultDevicePropStore );
\r
4154 CoTaskMemFree( deviceFormat );
\r
4155 CoTaskMemFree( closestMatchFormat );
\r
4157 if ( !errorText_.empty() )
\r
4158 error( errorType );
\r
4162 //-----------------------------------------------------------------------------
\r
4164 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4166 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4167 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4175 //-----------------------------------------------------------------------------
\r
4177 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4179 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4180 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4188 //-----------------------------------------------------------------------------
\r
4190 void RtApiWasapi::closeStream( void )
\r
4192 if ( stream_.state == STREAM_CLOSED ) {
\r
4193 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4194 error( RtAudioError::WARNING );
\r
4198 if ( stream_.state != STREAM_STOPPED )
\r
4201 // clean up stream memory
\r
4202 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4203 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4205 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4206 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4208 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4209 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4211 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4212 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4214 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4215 stream_.apiHandle = NULL;
\r
4217 for ( int i = 0; i < 2; i++ ) {
\r
4218 if ( stream_.userBuffer[i] ) {
\r
4219 free( stream_.userBuffer[i] );
\r
4220 stream_.userBuffer[i] = 0;
\r
4224 if ( stream_.deviceBuffer ) {
\r
4225 free( stream_.deviceBuffer );
\r
4226 stream_.deviceBuffer = 0;
\r
4229 // update stream state
\r
4230 stream_.state = STREAM_CLOSED;
\r
4233 //-----------------------------------------------------------------------------
\r
4235 void RtApiWasapi::startStream( void )
\r
4239 if ( stream_.state == STREAM_RUNNING ) {
\r
4240 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4241 error( RtAudioError::WARNING );
\r
4245 // update stream state
\r
4246 stream_.state = STREAM_RUNNING;
\r
4248 // create WASAPI stream thread
\r
4249 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4251 if ( !stream_.callbackInfo.thread ) {
\r
4252 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4253 error( RtAudioError::THREAD_ERROR );
\r
4256 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4257 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4261 //-----------------------------------------------------------------------------
\r
4263 void RtApiWasapi::stopStream( void )
\r
4267 if ( stream_.state == STREAM_STOPPED ) {
\r
4268 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4269 error( RtAudioError::WARNING );
\r
4273 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4274 stream_.state = STREAM_STOPPING;
\r
4276 // wait until stream thread is stopped
\r
4277 while( stream_.state != STREAM_STOPPED ) {
\r
4281 // Wait for the last buffer to play before stopping.
\r
4282 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4284 // stop capture client if applicable
\r
4285 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4286 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4287 if ( FAILED( hr ) ) {
\r
4288 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4289 error( RtAudioError::DRIVER_ERROR );
\r
4294 // stop render client if applicable
\r
4295 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4296 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4297 if ( FAILED( hr ) ) {
\r
4298 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4299 error( RtAudioError::DRIVER_ERROR );
\r
4304 // close thread handle
\r
4305 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4306 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4307 error( RtAudioError::THREAD_ERROR );
\r
4311 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4314 //-----------------------------------------------------------------------------
\r
4316 void RtApiWasapi::abortStream( void )
\r
4320 if ( stream_.state == STREAM_STOPPED ) {
\r
4321 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4322 error( RtAudioError::WARNING );
\r
4326 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4327 stream_.state = STREAM_STOPPING;
\r
4329 // wait until stream thread is stopped
\r
4330 while ( stream_.state != STREAM_STOPPED ) {
\r
4334 // stop capture client if applicable
\r
4335 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4336 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4337 if ( FAILED( hr ) ) {
\r
4338 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4339 error( RtAudioError::DRIVER_ERROR );
\r
4344 // stop render client if applicable
\r
4345 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4346 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4347 if ( FAILED( hr ) ) {
\r
4348 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4349 error( RtAudioError::DRIVER_ERROR );
\r
4354 // close thread handle
\r
4355 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4356 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4357 error( RtAudioError::THREAD_ERROR );
\r
4361 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4364 //-----------------------------------------------------------------------------
\r
4366 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4367 unsigned int firstChannel, unsigned int sampleRate,
\r
4368 RtAudioFormat format, unsigned int* bufferSize,
\r
4369 RtAudio::StreamOptions* options )
\r
4371 bool methodResult = FAILURE;
\r
4372 unsigned int captureDeviceCount = 0;
\r
4373 unsigned int renderDeviceCount = 0;
\r
4375 IMMDeviceCollection* captureDevices = NULL;
\r
4376 IMMDeviceCollection* renderDevices = NULL;
\r
4377 IMMDevice* devicePtr = NULL;
\r
4378 WAVEFORMATEX* deviceFormat = NULL;
\r
4379 unsigned int bufferBytes;
\r
4380 stream_.state = STREAM_STOPPED;
\r
4382 // create API Handle if not already created
\r
4383 if ( !stream_.apiHandle )
\r
4384 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4386 // Count capture devices
\r
4387 errorText_.clear();
\r
4388 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4389 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4390 if ( FAILED( hr ) ) {
\r
4391 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4395 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4396 if ( FAILED( hr ) ) {
\r
4397 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4401 // Count render devices
\r
4402 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4403 if ( FAILED( hr ) ) {
\r
4404 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4408 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4409 if ( FAILED( hr ) ) {
\r
4410 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4414 // validate device index
\r
4415 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4416 errorType = RtAudioError::INVALID_USE;
\r
4417 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4421 // determine whether index falls within capture or render devices
\r
4422 if ( device >= renderDeviceCount ) {
\r
4423 if ( mode != INPUT ) {
\r
4424 errorType = RtAudioError::INVALID_USE;
\r
4425 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4429 // retrieve captureAudioClient from devicePtr
\r
4430 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4432 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4433 if ( FAILED( hr ) ) {
\r
4434 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4438 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4439 NULL, ( void** ) &captureAudioClient );
\r
4440 if ( FAILED( hr ) ) {
\r
4441 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4445 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4446 if ( FAILED( hr ) ) {
\r
4447 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4451 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4452 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4455 if ( mode != OUTPUT ) {
\r
4456 errorType = RtAudioError::INVALID_USE;
\r
4457 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4461 // retrieve renderAudioClient from devicePtr
\r
4462 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4464 hr = renderDevices->Item( device, &devicePtr );
\r
4465 if ( FAILED( hr ) ) {
\r
4466 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4470 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4471 NULL, ( void** ) &renderAudioClient );
\r
4472 if ( FAILED( hr ) ) {
\r
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4477 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4478 if ( FAILED( hr ) ) {
\r
4479 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4483 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4484 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4487 // fill stream data
\r
4488 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4489 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4490 stream_.mode = DUPLEX;
\r
4493 stream_.mode = mode;
\r
4496 stream_.device[mode] = device;
\r
4497 stream_.doByteSwap[mode] = false;
\r
4498 stream_.sampleRate = sampleRate;
\r
4499 stream_.bufferSize = *bufferSize;
\r
4500 stream_.nBuffers = 1;
\r
4501 stream_.nUserChannels[mode] = channels;
\r
4502 stream_.channelOffset[mode] = firstChannel;
\r
4503 stream_.userFormat = format;
\r
4504 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4506 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4507 stream_.userInterleaved = false;
\r
4509 stream_.userInterleaved = true;
\r
4510 stream_.deviceInterleaved[mode] = true;
\r
4512 // Set flags for buffer conversion.
\r
4513 stream_.doConvertBuffer[mode] = false;
\r
4514 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4515 stream_.doConvertBuffer[mode] = true;
\r
4516 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4517 stream_.nUserChannels[mode] > 1 )
\r
4518 stream_.doConvertBuffer[mode] = true;
\r
4520 if ( stream_.doConvertBuffer[mode] )
\r
4521 setConvertInfo( mode, 0 );
\r
4523 // Allocate necessary internal buffers
\r
4524 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4526 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4527 if ( !stream_.userBuffer[mode] ) {
\r
4528 errorType = RtAudioError::MEMORY_ERROR;
\r
4529 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4533 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4534 stream_.callbackInfo.priority = 15;
\r
4536 stream_.callbackInfo.priority = 0;
\r
4538 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4539 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4541 methodResult = SUCCESS;
\r
4545 SAFE_RELEASE( captureDevices );
\r
4546 SAFE_RELEASE( renderDevices );
\r
4547 SAFE_RELEASE( devicePtr );
\r
4548 CoTaskMemFree( deviceFormat );
\r
4550 // if method failed, close the stream
\r
4551 if ( methodResult == FAILURE )
\r
4554 if ( !errorText_.empty() )
\r
4555 error( errorType );
\r
4556 return methodResult;
\r
4559 //=============================================================================
\r
4561 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4564 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4569 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4572 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4577 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4580 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4585 //-----------------------------------------------------------------------------
\r
4587 void RtApiWasapi::wasapiThread()
\r
4589 // as this is a new thread, we must CoInitialize it
\r
4590 CoInitialize( NULL );
\r
4594 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4595 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4596 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4597 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4598 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4599 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4601 WAVEFORMATEX* captureFormat = NULL;
\r
4602 WAVEFORMATEX* renderFormat = NULL;
\r
4603 float captureSrRatio = 0.0f;
\r
4604 float renderSrRatio = 0.0f;
\r
4605 WasapiBuffer captureBuffer;
\r
4606 WasapiBuffer renderBuffer;
\r
4608 // declare local stream variables
\r
4609 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4610 BYTE* streamBuffer = NULL;
\r
4611 unsigned long captureFlags = 0;
\r
4612 unsigned int bufferFrameCount = 0;
\r
4613 unsigned int numFramesPadding = 0;
\r
4614 unsigned int convBufferSize = 0;
\r
4615 bool callbackPushed = false;
\r
4616 bool callbackPulled = false;
\r
4617 bool callbackStopped = false;
\r
4618 int callbackResult = 0;
\r
4620 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4621 char* convBuffer = NULL;
\r
4622 unsigned int convBuffSize = 0;
\r
4623 unsigned int deviceBuffSize = 0;
\r
4625 errorText_.clear();
\r
4626 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4628 // Attempt to assign "Pro Audio" characteristic to thread
\r
4629 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4631 DWORD taskIndex = 0;
\r
4632 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4633 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4634 FreeLibrary( AvrtDll );
\r
4637 // start capture stream if applicable
\r
4638 if ( captureAudioClient ) {
\r
4639 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4640 if ( FAILED( hr ) ) {
\r
4641 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4645 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4647 // initialize capture stream according to desire buffer size
\r
4648 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4649 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4651 if ( !captureClient ) {
\r
4652 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4653 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4654 desiredBufferPeriod,
\r
4655 desiredBufferPeriod,
\r
4658 if ( FAILED( hr ) ) {
\r
4659 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4663 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4664 ( void** ) &captureClient );
\r
4665 if ( FAILED( hr ) ) {
\r
4666 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4670 // configure captureEvent to trigger on every available capture buffer
\r
4671 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4672 if ( !captureEvent ) {
\r
4673 errorType = RtAudioError::SYSTEM_ERROR;
\r
4674 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4678 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4679 if ( FAILED( hr ) ) {
\r
4680 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4684 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4685 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4688 unsigned int inBufferSize = 0;
\r
4689 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4690 if ( FAILED( hr ) ) {
\r
4691 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4695 // scale outBufferSize according to stream->user sample rate ratio
\r
4696 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4697 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4699 // set captureBuffer size
\r
4700 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4702 // reset the capture stream
\r
4703 hr = captureAudioClient->Reset();
\r
4704 if ( FAILED( hr ) ) {
\r
4705 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4709 // start the capture stream
\r
4710 hr = captureAudioClient->Start();
\r
4711 if ( FAILED( hr ) ) {
\r
4712 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4717 // start render stream if applicable
\r
4718 if ( renderAudioClient ) {
\r
4719 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4720 if ( FAILED( hr ) ) {
\r
4721 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4725 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4727 // initialize render stream according to desire buffer size
\r
4728 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4729 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4731 if ( !renderClient ) {
\r
4732 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4733 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4734 desiredBufferPeriod,
\r
4735 desiredBufferPeriod,
\r
4738 if ( FAILED( hr ) ) {
\r
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4743 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4744 ( void** ) &renderClient );
\r
4745 if ( FAILED( hr ) ) {
\r
4746 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4750 // configure renderEvent to trigger on every available render buffer
\r
4751 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4752 if ( !renderEvent ) {
\r
4753 errorType = RtAudioError::SYSTEM_ERROR;
\r
4754 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4758 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4759 if ( FAILED( hr ) ) {
\r
4760 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4764 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4765 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4768 unsigned int outBufferSize = 0;
\r
4769 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4770 if ( FAILED( hr ) ) {
\r
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4775 // scale inBufferSize according to user->stream sample rate ratio
\r
4776 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4777 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4779 // set renderBuffer size
\r
4780 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4782 // reset the render stream
\r
4783 hr = renderAudioClient->Reset();
\r
4784 if ( FAILED( hr ) ) {
\r
4785 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4789 // start the render stream
\r
4790 hr = renderAudioClient->Start();
\r
4791 if ( FAILED( hr ) ) {
\r
4792 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4797 if ( stream_.mode == INPUT ) {
\r
4798 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4799 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4801 else if ( stream_.mode == OUTPUT ) {
\r
4802 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4803 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4805 else if ( stream_.mode == DUPLEX ) {
\r
4806 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4807 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4808 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4809 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4812 convBuffer = ( char* ) malloc( convBuffSize );
\r
4813 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4814 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4815 errorType = RtAudioError::MEMORY_ERROR;
\r
4816 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4820 // stream process loop
\r
4821 while ( stream_.state != STREAM_STOPPING ) {
\r
4822 if ( !callbackPulled ) {
\r
4825 // 1. Pull callback buffer from inputBuffer
\r
4826 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4827 // Convert callback buffer to user format
\r
4829 if ( captureAudioClient ) {
\r
4830 // Pull callback buffer from inputBuffer
\r
4831 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4832 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4833 stream_.deviceFormat[INPUT] );
\r
4835 if ( callbackPulled ) {
\r
4836 // Convert callback buffer to user sample rate and channel count
\r
4837 convertBufferWasapi( stream_.deviceBuffer,
\r
4839 stream_.nDeviceChannels[INPUT],
\r
4840 stream_.nUserChannels[INPUT],
\r
4841 captureFormat->nSamplesPerSec,
\r
4842 stream_.sampleRate,
\r
4843 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4845 stream_.deviceFormat[INPUT] );
\r
4847 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4848 // Convert callback buffer to user format
\r
4849 convertBuffer( stream_.userBuffer[INPUT],
\r
4850 stream_.deviceBuffer,
\r
4851 stream_.convertInfo[INPUT] );
\r
4854 // no conversion, simple copy deviceBuffer to userBuffer
\r
4855 memcpy( stream_.userBuffer[INPUT],
\r
4856 stream_.deviceBuffer,
\r
4857 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4862 // if there is no capture stream, set callbackPulled flag
\r
4863 callbackPulled = true;
\r
4866 // Execute Callback
\r
4867 // ================
\r
4868 // 1. Execute user callback method
\r
4869 // 2. Handle return value from callback
\r
4871 // if callback has not requested the stream to stop
\r
4872 if ( callbackPulled && !callbackStopped ) {
\r
4873 // Execute user callback method
\r
4874 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4875 stream_.userBuffer[INPUT],
\r
4876 stream_.bufferSize,
\r
4878 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4879 stream_.callbackInfo.userData );
\r
4881 // Handle return value from callback
\r
4882 if ( callbackResult == 1 ) {
\r
4883 // instantiate a thread to stop this thread
\r
4884 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4885 if ( !threadHandle ) {
\r
4886 errorType = RtAudioError::THREAD_ERROR;
\r
4887 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4890 else if ( !CloseHandle( threadHandle ) ) {
\r
4891 errorType = RtAudioError::THREAD_ERROR;
\r
4892 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4896 callbackStopped = true;
\r
4898 else if ( callbackResult == 2 ) {
\r
4899 // instantiate a thread to stop this thread
\r
4900 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4901 if ( !threadHandle ) {
\r
4902 errorType = RtAudioError::THREAD_ERROR;
\r
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4906 else if ( !CloseHandle( threadHandle ) ) {
\r
4907 errorType = RtAudioError::THREAD_ERROR;
\r
4908 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4912 callbackStopped = true;
\r
4917 // Callback Output
\r
4918 // ===============
\r
4919 // 1. Convert callback buffer to stream format
\r
4920 // 2. Convert callback buffer to stream sample rate and channel count
\r
4921 // 3. Push callback buffer into outputBuffer
\r
4923 if ( renderAudioClient && callbackPulled ) {
\r
4924 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4925 // Convert callback buffer to stream format
\r
4926 convertBuffer( stream_.deviceBuffer,
\r
4927 stream_.userBuffer[OUTPUT],
\r
4928 stream_.convertInfo[OUTPUT] );
\r
4930 // Convert callback buffer to stream sample rate and channel count
\r
4931 convertBufferWasapi( convBuffer,
\r
4932 stream_.deviceBuffer,
\r
4933 stream_.nUserChannels[OUTPUT],
\r
4934 stream_.nDeviceChannels[OUTPUT],
\r
4935 stream_.sampleRate,
\r
4936 renderFormat->nSamplesPerSec,
\r
4937 stream_.bufferSize,
\r
4939 stream_.deviceFormat[OUTPUT] );
\r
4942 // Convert callback buffer to stream sample rate and channel count
\r
4943 convertBufferWasapi( convBuffer,
\r
4944 stream_.userBuffer[OUTPUT],
\r
4945 stream_.nUserChannels[OUTPUT],
\r
4946 stream_.nDeviceChannels[OUTPUT],
\r
4947 stream_.sampleRate,
\r
4948 renderFormat->nSamplesPerSec,
\r
4949 stream_.bufferSize,
\r
4951 stream_.deviceFormat[OUTPUT] );
\r
4954 // Push callback buffer into outputBuffer
\r
4955 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4956 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4957 stream_.deviceFormat[OUTPUT] );
\r
4960 // if there is no render stream, set callbackPushed flag
\r
4961 callbackPushed = true;
\r
4966 // 1. Get capture buffer from stream
\r
4967 // 2. Push capture buffer into inputBuffer
\r
4968 // 3. If 2. was successful: Release capture buffer
\r
4970 if ( captureAudioClient ) {
\r
4971 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4972 if ( !callbackPulled ) {
\r
4973 WaitForSingleObject( captureEvent, INFINITE );
\r
4976 // Get capture buffer from stream
\r
4977 hr = captureClient->GetBuffer( &streamBuffer,
\r
4978 &bufferFrameCount,
\r
4979 &captureFlags, NULL, NULL );
\r
4980 if ( FAILED( hr ) ) {
\r
4981 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4985 if ( bufferFrameCount != 0 ) {
\r
4986 // Push capture buffer into inputBuffer
\r
4987 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4988 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4989 stream_.deviceFormat[INPUT] ) )
\r
4991 // Release capture buffer
\r
4992 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4993 if ( FAILED( hr ) ) {
\r
4994 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5000 // Inform WASAPI that capture was unsuccessful
\r
5001 hr = captureClient->ReleaseBuffer( 0 );
\r
5002 if ( FAILED( hr ) ) {
\r
5003 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5010 // Inform WASAPI that capture was unsuccessful
\r
5011 hr = captureClient->ReleaseBuffer( 0 );
\r
5012 if ( FAILED( hr ) ) {
\r
5013 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5021 // 1. Get render buffer from stream
\r
5022 // 2. Pull next buffer from outputBuffer
\r
5023 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5024 // Release render buffer
\r
5026 if ( renderAudioClient ) {
\r
5027 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5028 if ( callbackPulled && !callbackPushed ) {
\r
5029 WaitForSingleObject( renderEvent, INFINITE );
\r
5032 // Get render buffer from stream
\r
5033 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5034 if ( FAILED( hr ) ) {
\r
5035 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5039 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5040 if ( FAILED( hr ) ) {
\r
5041 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5045 bufferFrameCount -= numFramesPadding;
\r
5047 if ( bufferFrameCount != 0 ) {
\r
5048 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5049 if ( FAILED( hr ) ) {
\r
5050 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5054 // Pull next buffer from outputBuffer
\r
5055 // Fill render buffer with next buffer
\r
5056 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5057 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5058 stream_.deviceFormat[OUTPUT] ) )
\r
5060 // Release render buffer
\r
5061 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5062 if ( FAILED( hr ) ) {
\r
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5069 // Inform WASAPI that render was unsuccessful
\r
5070 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5071 if ( FAILED( hr ) ) {
\r
5072 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5079 // Inform WASAPI that render was unsuccessful
\r
5080 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5081 if ( FAILED( hr ) ) {
\r
5082 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5088 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5089 if ( callbackPushed ) {
\r
5090 callbackPulled = false;
\r
5093 // tick stream time
\r
5094 RtApi::tickStreamTime();
\r
5099 CoTaskMemFree( captureFormat );
\r
5100 CoTaskMemFree( renderFormat );
\r
5102 //delete convBuffer;
\r
5103 free ( convBuffer );
\r
5107 // update stream state
\r
5108 stream_.state = STREAM_STOPPED;
\r
5110 if ( errorText_.empty() )
\r
5113 error( errorType );
\r
5116 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5120 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5122 // Modified by Robin Davies, October 2005
\r
5123 // - Improvements to DirectX pointer chasing.
\r
5124 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5125 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5126 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5127 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5129 #include <dsound.h>
\r
5130 #include <assert.h>
\r
5131 #include <algorithm>
\r
5133 #if defined(__MINGW32__)
\r
5134 // missing from latest mingw winapi
\r
5135 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5136 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5137 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5138 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5141 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5143 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5144 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5147 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5149 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5150 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5151 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5152 return pointer >= earlierPointer && pointer < laterPointer;
\r
5155 // A structure to hold various information related to the DirectSound
\r
5156 // API implementation.
\r
5158 unsigned int drainCounter; // Tracks callback counts when draining
\r
5159 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5163 UINT bufferPointer[2];
\r
5164 DWORD dsBufferSize[2];
\r
5165 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5169 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5172 // Declarations for utility functions, callbacks, and structures
\r
5173 // specific to the DirectSound implementation.
\r
5174 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5175 LPCTSTR description,
\r
5177 LPVOID lpContext );
\r
5179 static const char* getErrorString( int code );
\r
5181 static unsigned __stdcall callbackHandler( void *ptr );
\r
5190 : found(false) { validId[0] = false; validId[1] = false; }
\r
5193 struct DsProbeData {
\r
5195 std::vector<struct DsDevice>* dsDevices;
\r
5198 RtApiDs :: RtApiDs()
\r
5200 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5201 // accept whatever the mainline chose for a threading model.
\r
5202 coInitialized_ = false;
\r
5203 HRESULT hr = CoInitialize( NULL );
\r
5204 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5207 RtApiDs :: ~RtApiDs()
\r
5209 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5210 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5213 // The DirectSound default output is always the first device.
\r
5214 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5219 // The DirectSound default input is always the first input device,
\r
5220 // which is the first capture device enumerated.
\r
5221 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5226 unsigned int RtApiDs :: getDeviceCount( void )
\r
5228 // Set query flag for previously found devices to false, so that we
\r
5229 // can check for any devices that have disappeared.
\r
5230 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5231 dsDevices[i].found = false;
\r
5233 // Query DirectSound devices.
\r
5234 struct DsProbeData probeInfo;
\r
5235 probeInfo.isInput = false;
\r
5236 probeInfo.dsDevices = &dsDevices;
\r
5237 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5238 if ( FAILED( result ) ) {
\r
5239 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5240 errorText_ = errorStream_.str();
\r
5241 error( RtAudioError::WARNING );
\r
5244 // Query DirectSoundCapture devices.
\r
5245 probeInfo.isInput = true;
\r
5246 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5247 if ( FAILED( result ) ) {
\r
5248 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5249 errorText_ = errorStream_.str();
\r
5250 error( RtAudioError::WARNING );
\r
5253 // Clean out any devices that may have disappeared.
\r
5254 std::vector< int > indices;
\r
5255 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5256 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5257 //unsigned int nErased = 0;
\r
5258 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5259 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5260 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5262 return static_cast<unsigned int>(dsDevices.size());
\r
5265 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5267 RtAudio::DeviceInfo info;
\r
5268 info.probed = false;
\r
5270 if ( dsDevices.size() == 0 ) {
\r
5271 // Force a query of all devices
\r
5273 if ( dsDevices.size() == 0 ) {
\r
5274 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5275 error( RtAudioError::INVALID_USE );
\r
5280 if ( device >= dsDevices.size() ) {
\r
5281 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5282 error( RtAudioError::INVALID_USE );
\r
5287 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5289 LPDIRECTSOUND output;
\r
5291 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5292 if ( FAILED( result ) ) {
\r
5293 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5294 errorText_ = errorStream_.str();
\r
5295 error( RtAudioError::WARNING );
\r
5299 outCaps.dwSize = sizeof( outCaps );
\r
5300 result = output->GetCaps( &outCaps );
\r
5301 if ( FAILED( result ) ) {
\r
5302 output->Release();
\r
5303 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5304 errorText_ = errorStream_.str();
\r
5305 error( RtAudioError::WARNING );
\r
5309 // Get output channel information.
\r
5310 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5312 // Get sample rate information.
\r
5313 info.sampleRates.clear();
\r
5314 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5315 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5316 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5317 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5320 // Get format information.
\r
5321 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5322 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5324 output->Release();
\r
5326 if ( getDefaultOutputDevice() == device )
\r
5327 info.isDefaultOutput = true;
\r
5329 if ( dsDevices[ device ].validId[1] == false ) {
\r
5330 info.name = dsDevices[ device ].name;
\r
5331 info.probed = true;
\r
5337 LPDIRECTSOUNDCAPTURE input;
\r
5338 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5339 if ( FAILED( result ) ) {
\r
5340 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5341 errorText_ = errorStream_.str();
\r
5342 error( RtAudioError::WARNING );
\r
5347 inCaps.dwSize = sizeof( inCaps );
\r
5348 result = input->GetCaps( &inCaps );
\r
5349 if ( FAILED( result ) ) {
\r
5351 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5352 errorText_ = errorStream_.str();
\r
5353 error( RtAudioError::WARNING );
\r
5357 // Get input channel information.
\r
5358 info.inputChannels = inCaps.dwChannels;
\r
5360 // Get sample rate and format information.
\r
5361 std::vector<unsigned int> rates;
\r
5362 if ( inCaps.dwChannels >= 2 ) {
\r
5363 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5364 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5367 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5368 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5369 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5372 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5373 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5374 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5375 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5376 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5378 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5385 else if ( inCaps.dwChannels == 1 ) {
\r
5386 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5390 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5391 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5392 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5395 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5396 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5397 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5398 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5399 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5401 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5402 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5403 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5404 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5405 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5408 else info.inputChannels = 0; // technically, this would be an error
\r
5412 if ( info.inputChannels == 0 ) return info;
\r
5414 // Copy the supported rates to the info structure but avoid duplication.
\r
5416 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5418 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5419 if ( rates[i] == info.sampleRates[j] ) {
\r
5424 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5426 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5428 // If device opens for both playback and capture, we determine the channels.
\r
5429 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5430 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5432 if ( device == 0 ) info.isDefaultInput = true;
\r
5434 // Copy name and return.
\r
5435 info.name = dsDevices[ device ].name;
\r
5436 info.probed = true;
\r
5440 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5441 unsigned int firstChannel, unsigned int sampleRate,
\r
5442 RtAudioFormat format, unsigned int *bufferSize,
\r
5443 RtAudio::StreamOptions *options )
\r
5445 if ( channels + firstChannel > 2 ) {
\r
5446 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5450 size_t nDevices = dsDevices.size();
\r
5451 if ( nDevices == 0 ) {
\r
5452 // This should not happen because a check is made before this function is called.
\r
5453 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5457 if ( device >= nDevices ) {
\r
5458 // This should not happen because a check is made before this function is called.
\r
5459 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5463 if ( mode == OUTPUT ) {
\r
5464 if ( dsDevices[ device ].validId[0] == false ) {
\r
5465 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5466 errorText_ = errorStream_.str();
\r
5470 else { // mode == INPUT
\r
5471 if ( dsDevices[ device ].validId[1] == false ) {
\r
5472 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5473 errorText_ = errorStream_.str();
\r
5478 // According to a note in PortAudio, using GetDesktopWindow()
\r
5479 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5480 // that occur when the application's window is not the foreground
\r
5481 // window. Also, if the application window closes before the
\r
5482 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5483 // problems when using GetDesktopWindow() but it seems fine now
\r
5484 // (January 2010). I'll leave it commented here.
\r
5485 // HWND hWnd = GetForegroundWindow();
\r
5486 HWND hWnd = GetDesktopWindow();
\r
5488 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5489 // two. This is a judgement call and a value of two is probably too
\r
5490 // low for capture, but it should work for playback.
\r
5492 if ( options ) nBuffers = options->numberOfBuffers;
\r
5493 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5494 if ( nBuffers < 2 ) nBuffers = 3;
\r
5496 // Check the lower range of the user-specified buffer size and set
\r
5497 // (arbitrarily) to a lower bound of 32.
\r
5498 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5500 // Create the wave format structure. The data format setting will
\r
5501 // be determined later.
\r
5502 WAVEFORMATEX waveFormat;
\r
5503 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5504 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5505 waveFormat.nChannels = channels + firstChannel;
\r
5506 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5508 // Determine the device buffer size. By default, we'll use the value
\r
5509 // defined above (32K), but we will grow it to make allowances for
\r
5510 // very large software buffer sizes.
\r
5511 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5512 DWORD dsPointerLeadTime = 0;
\r
5514 void *ohandle = 0, *bhandle = 0;
\r
5516 if ( mode == OUTPUT ) {
\r
5518 LPDIRECTSOUND output;
\r
5519 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5520 if ( FAILED( result ) ) {
\r
5521 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5522 errorText_ = errorStream_.str();
\r
5527 outCaps.dwSize = sizeof( outCaps );
\r
5528 result = output->GetCaps( &outCaps );
\r
5529 if ( FAILED( result ) ) {
\r
5530 output->Release();
\r
5531 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5532 errorText_ = errorStream_.str();
\r
5536 // Check channel information.
\r
5537 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5538 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5539 errorText_ = errorStream_.str();
\r
5543 // Check format information. Use 16-bit format unless not
\r
5544 // supported or user requests 8-bit.
\r
5545 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5546 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5547 waveFormat.wBitsPerSample = 16;
\r
5548 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5551 waveFormat.wBitsPerSample = 8;
\r
5552 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5554 stream_.userFormat = format;
\r
5556 // Update wave format structure and buffer information.
\r
5557 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5558 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5559 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5561 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5562 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5563 dsBufferSize *= 2;
\r
5565 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5566 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5567 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5568 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5569 if ( FAILED( result ) ) {
\r
5570 output->Release();
\r
5571 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5572 errorText_ = errorStream_.str();
\r
5576 // Even though we will write to the secondary buffer, we need to
\r
5577 // access the primary buffer to set the correct output format
\r
5578 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5579 // buffer description.
\r
5580 DSBUFFERDESC bufferDescription;
\r
5581 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5582 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5583 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5585 // Obtain the primary buffer
\r
5586 LPDIRECTSOUNDBUFFER buffer;
\r
5587 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5588 if ( FAILED( result ) ) {
\r
5589 output->Release();
\r
5590 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5591 errorText_ = errorStream_.str();
\r
5595 // Set the primary DS buffer sound format.
\r
5596 result = buffer->SetFormat( &waveFormat );
\r
5597 if ( FAILED( result ) ) {
\r
5598 output->Release();
\r
5599 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5600 errorText_ = errorStream_.str();
\r
5604 // Setup the secondary DS buffer description.
\r
5605 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5606 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5607 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5608 DSBCAPS_GLOBALFOCUS |
\r
5609 DSBCAPS_GETCURRENTPOSITION2 |
\r
5610 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5611 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5612 bufferDescription.lpwfxFormat = &waveFormat;
\r
5614 // Try to create the secondary DS buffer. If that doesn't work,
\r
5615 // try to use software mixing. Otherwise, there's a problem.
\r
5616 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5617 if ( FAILED( result ) ) {
\r
5618 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5619 DSBCAPS_GLOBALFOCUS |
\r
5620 DSBCAPS_GETCURRENTPOSITION2 |
\r
5621 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5622 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5623 if ( FAILED( result ) ) {
\r
5624 output->Release();
\r
5625 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5626 errorText_ = errorStream_.str();
\r
5631 // Get the buffer size ... might be different from what we specified.
\r
5633 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5634 result = buffer->GetCaps( &dsbcaps );
\r
5635 if ( FAILED( result ) ) {
\r
5636 output->Release();
\r
5637 buffer->Release();
\r
5638 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5639 errorText_ = errorStream_.str();
\r
5643 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5645 // Lock the DS buffer
\r
5648 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5649 if ( FAILED( result ) ) {
\r
5650 output->Release();
\r
5651 buffer->Release();
\r
5652 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5653 errorText_ = errorStream_.str();
\r
5657 // Zero the DS buffer
\r
5658 ZeroMemory( audioPtr, dataLen );
\r
5660 // Unlock the DS buffer
\r
5661 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5662 if ( FAILED( result ) ) {
\r
5663 output->Release();
\r
5664 buffer->Release();
\r
5665 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5666 errorText_ = errorStream_.str();
\r
5670 ohandle = (void *) output;
\r
5671 bhandle = (void *) buffer;
\r
5674 if ( mode == INPUT ) {
\r
5676 LPDIRECTSOUNDCAPTURE input;
\r
5677 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5678 if ( FAILED( result ) ) {
\r
5679 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5680 errorText_ = errorStream_.str();
\r
5685 inCaps.dwSize = sizeof( inCaps );
\r
5686 result = input->GetCaps( &inCaps );
\r
5687 if ( FAILED( result ) ) {
\r
5689 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5690 errorText_ = errorStream_.str();
\r
5694 // Check channel information.
\r
5695 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5696 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5700 // Check format information. Use 16-bit format unless user
\r
5701 // requests 8-bit.
\r
5702 DWORD deviceFormats;
\r
5703 if ( channels + firstChannel == 2 ) {
\r
5704 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5705 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5706 waveFormat.wBitsPerSample = 8;
\r
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5709 else { // assume 16-bit is supported
\r
5710 waveFormat.wBitsPerSample = 16;
\r
5711 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5714 else { // channel == 1
\r
5715 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5716 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5717 waveFormat.wBitsPerSample = 8;
\r
5718 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5720 else { // assume 16-bit is supported
\r
5721 waveFormat.wBitsPerSample = 16;
\r
5722 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5725 stream_.userFormat = format;
\r
5727 // Update wave format structure and buffer information.
\r
5728 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5729 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5730 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5732 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5733 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5734 dsBufferSize *= 2;
\r
5736 // Setup the secondary DS buffer description.
\r
5737 DSCBUFFERDESC bufferDescription;
\r
5738 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5739 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5740 bufferDescription.dwFlags = 0;
\r
5741 bufferDescription.dwReserved = 0;
\r
5742 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5743 bufferDescription.lpwfxFormat = &waveFormat;
\r
5745 // Create the capture buffer.
\r
5746 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5747 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5748 if ( FAILED( result ) ) {
\r
5750 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5751 errorText_ = errorStream_.str();
\r
5755 // Get the buffer size ... might be different from what we specified.
\r
5756 DSCBCAPS dscbcaps;
\r
5757 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5758 result = buffer->GetCaps( &dscbcaps );
\r
5759 if ( FAILED( result ) ) {
\r
5761 buffer->Release();
\r
5762 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5763 errorText_ = errorStream_.str();
\r
5767 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5769 // NOTE: We could have a problem here if this is a duplex stream
\r
5770 // and the play and capture hardware buffer sizes are different
\r
5771 // (I'm actually not sure if that is a problem or not).
\r
5772 // Currently, we are not verifying that.
\r
5774 // Lock the capture buffer
\r
5777 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5778 if ( FAILED( result ) ) {
\r
5780 buffer->Release();
\r
5781 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5782 errorText_ = errorStream_.str();
\r
5786 // Zero the buffer
\r
5787 ZeroMemory( audioPtr, dataLen );
\r
5789 // Unlock the buffer
\r
5790 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5791 if ( FAILED( result ) ) {
\r
5793 buffer->Release();
\r
5794 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5795 errorText_ = errorStream_.str();
\r
5799 ohandle = (void *) input;
\r
5800 bhandle = (void *) buffer;
\r
5803 // Set various stream parameters
\r
5804 DsHandle *handle = 0;
\r
5805 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5806 stream_.nUserChannels[mode] = channels;
\r
5807 stream_.bufferSize = *bufferSize;
\r
5808 stream_.channelOffset[mode] = firstChannel;
\r
5809 stream_.deviceInterleaved[mode] = true;
\r
5810 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5811 else stream_.userInterleaved = true;
\r
5813 // Set flag for buffer conversion
\r
5814 stream_.doConvertBuffer[mode] = false;
\r
5815 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5816 stream_.doConvertBuffer[mode] = true;
\r
5817 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5818 stream_.doConvertBuffer[mode] = true;
\r
5819 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5820 stream_.nUserChannels[mode] > 1 )
\r
5821 stream_.doConvertBuffer[mode] = true;
\r
5823 // Allocate necessary internal buffers
\r
5824 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5825 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5826 if ( stream_.userBuffer[mode] == NULL ) {
\r
5827 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5831 if ( stream_.doConvertBuffer[mode] ) {
\r
5833 bool makeBuffer = true;
\r
5834 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5835 if ( mode == INPUT ) {
\r
5836 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5837 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5838 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5842 if ( makeBuffer ) {
\r
5843 bufferBytes *= *bufferSize;
\r
5844 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5845 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5846 if ( stream_.deviceBuffer == NULL ) {
\r
5847 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5853 // Allocate our DsHandle structures for the stream.
\r
5854 if ( stream_.apiHandle == 0 ) {
\r
5856 handle = new DsHandle;
\r
5858 catch ( std::bad_alloc& ) {
\r
5859 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5863 // Create a manual-reset event.
\r
5864 handle->condition = CreateEvent( NULL, // no security
\r
5865 TRUE, // manual-reset
\r
5866 FALSE, // non-signaled initially
\r
5867 NULL ); // unnamed
\r
5868 stream_.apiHandle = (void *) handle;
\r
5871 handle = (DsHandle *) stream_.apiHandle;
\r
5872 handle->id[mode] = ohandle;
\r
5873 handle->buffer[mode] = bhandle;
\r
5874 handle->dsBufferSize[mode] = dsBufferSize;
\r
5875 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5877 stream_.device[mode] = device;
\r
5878 stream_.state = STREAM_STOPPED;
\r
5879 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5880 // We had already set up an output stream.
\r
5881 stream_.mode = DUPLEX;
\r
5883 stream_.mode = mode;
\r
5884 stream_.nBuffers = nBuffers;
\r
5885 stream_.sampleRate = sampleRate;
\r
5887 // Setup the buffer conversion information structure.
\r
5888 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5890 // Setup the callback thread.
\r
5891 if ( stream_.callbackInfo.isRunning == false ) {
\r
5892 unsigned threadId;
\r
5893 stream_.callbackInfo.isRunning = true;
\r
5894 stream_.callbackInfo.object = (void *) this;
\r
5895 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5896 &stream_.callbackInfo, 0, &threadId );
\r
5897 if ( stream_.callbackInfo.thread == 0 ) {
\r
5898 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5902 // Boost DS thread priority
\r
5903 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5909 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5910 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5911 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5912 if ( buffer ) buffer->Release();
\r
5913 object->Release();
\r
5915 if ( handle->buffer[1] ) {
\r
5916 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5917 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5918 if ( buffer ) buffer->Release();
\r
5919 object->Release();
\r
5921 CloseHandle( handle->condition );
\r
5923 stream_.apiHandle = 0;
\r
5926 for ( int i=0; i<2; i++ ) {
\r
5927 if ( stream_.userBuffer[i] ) {
\r
5928 free( stream_.userBuffer[i] );
\r
5929 stream_.userBuffer[i] = 0;
\r
5933 if ( stream_.deviceBuffer ) {
\r
5934 free( stream_.deviceBuffer );
\r
5935 stream_.deviceBuffer = 0;
\r
5938 stream_.state = STREAM_CLOSED;
\r
5942 void RtApiDs :: closeStream()
\r
5944 if ( stream_.state == STREAM_CLOSED ) {
\r
5945 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5946 error( RtAudioError::WARNING );
\r
5950 // Stop the callback thread.
\r
5951 stream_.callbackInfo.isRunning = false;
\r
5952 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5953 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5955 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5957 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5958 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5959 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5962 buffer->Release();
\r
5964 object->Release();
\r
5966 if ( handle->buffer[1] ) {
\r
5967 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5968 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5971 buffer->Release();
\r
5973 object->Release();
\r
5975 CloseHandle( handle->condition );
\r
5977 stream_.apiHandle = 0;
\r
5980 for ( int i=0; i<2; i++ ) {
\r
5981 if ( stream_.userBuffer[i] ) {
\r
5982 free( stream_.userBuffer[i] );
\r
5983 stream_.userBuffer[i] = 0;
\r
5987 if ( stream_.deviceBuffer ) {
\r
5988 free( stream_.deviceBuffer );
\r
5989 stream_.deviceBuffer = 0;
\r
5992 stream_.mode = UNINITIALIZED;
\r
5993 stream_.state = STREAM_CLOSED;
\r
5996 void RtApiDs :: startStream()
\r
5999 if ( stream_.state == STREAM_RUNNING ) {
\r
6000 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6001 error( RtAudioError::WARNING );
\r
6005 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6007 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6008 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6009 // this is already in effect.
\r
6010 timeBeginPeriod( 1 );
\r
6012 buffersRolling = false;
\r
6013 duplexPrerollBytes = 0;
\r
6015 if ( stream_.mode == DUPLEX ) {
\r
6016 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6017 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6020 HRESULT result = 0;
\r
6021 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6023 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6024 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6025 if ( FAILED( result ) ) {
\r
6026 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6027 errorText_ = errorStream_.str();
\r
6032 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6034 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6035 result = buffer->Start( DSCBSTART_LOOPING );
\r
6036 if ( FAILED( result ) ) {
\r
6037 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6038 errorText_ = errorStream_.str();
\r
6043 handle->drainCounter = 0;
\r
6044 handle->internalDrain = false;
\r
6045 ResetEvent( handle->condition );
\r
6046 stream_.state = STREAM_RUNNING;
\r
6049 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6052 void RtApiDs :: stopStream()
\r
6055 if ( stream_.state == STREAM_STOPPED ) {
\r
6056 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6057 error( RtAudioError::WARNING );
\r
6061 HRESULT result = 0;
\r
6064 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6065 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6066 if ( handle->drainCounter == 0 ) {
\r
6067 handle->drainCounter = 2;
\r
6068 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6071 stream_.state = STREAM_STOPPED;
\r
6073 MUTEX_LOCK( &stream_.mutex );
\r
6075 // Stop the buffer and clear memory
\r
6076 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6077 result = buffer->Stop();
\r
6078 if ( FAILED( result ) ) {
\r
6079 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6080 errorText_ = errorStream_.str();
\r
6084 // Lock the buffer and clear it so that if we start to play again,
\r
6085 // we won't have old data playing.
\r
6086 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6087 if ( FAILED( result ) ) {
\r
6088 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6089 errorText_ = errorStream_.str();
\r
6093 // Zero the DS buffer
\r
6094 ZeroMemory( audioPtr, dataLen );
\r
6096 // Unlock the DS buffer
\r
6097 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6098 if ( FAILED( result ) ) {
\r
6099 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6100 errorText_ = errorStream_.str();
\r
6104 // If we start playing again, we must begin at beginning of buffer.
\r
6105 handle->bufferPointer[0] = 0;
\r
6108 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6109 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6113 stream_.state = STREAM_STOPPED;
\r
6115 if ( stream_.mode != DUPLEX )
\r
6116 MUTEX_LOCK( &stream_.mutex );
\r
6118 result = buffer->Stop();
\r
6119 if ( FAILED( result ) ) {
\r
6120 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6121 errorText_ = errorStream_.str();
\r
6125 // Lock the buffer and clear it so that if we start to play again,
\r
6126 // we won't have old data playing.
\r
6127 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6128 if ( FAILED( result ) ) {
\r
6129 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6130 errorText_ = errorStream_.str();
\r
6134 // Zero the DS buffer
\r
6135 ZeroMemory( audioPtr, dataLen );
\r
6137 // Unlock the DS buffer
\r
6138 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6139 if ( FAILED( result ) ) {
\r
6140 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6141 errorText_ = errorStream_.str();
\r
6145 // If we start recording again, we must begin at beginning of buffer.
\r
6146 handle->bufferPointer[1] = 0;
\r
6150 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6151 MUTEX_UNLOCK( &stream_.mutex );
\r
6153 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6156 void RtApiDs :: abortStream()
\r
6159 if ( stream_.state == STREAM_STOPPED ) {
\r
6160 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6161 error( RtAudioError::WARNING );
\r
6165 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6166 handle->drainCounter = 2;
\r
6171 void RtApiDs :: callbackEvent()
\r
6173 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6174 Sleep( 50 ); // sleep 50 milliseconds
\r
6178 if ( stream_.state == STREAM_CLOSED ) {
\r
6179 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6180 error( RtAudioError::WARNING );
\r
6184 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6185 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6187 // Check if we were draining the stream and signal is finished.
\r
6188 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6190 stream_.state = STREAM_STOPPING;
\r
6191 if ( handle->internalDrain == false )
\r
6192 SetEvent( handle->condition );
\r
6198 // Invoke user callback to get fresh output data UNLESS we are
\r
6199 // draining stream.
\r
6200 if ( handle->drainCounter == 0 ) {
\r
6201 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6202 double streamTime = getStreamTime();
\r
6203 RtAudioStreamStatus status = 0;
\r
6204 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6205 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6206 handle->xrun[0] = false;
\r
6208 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6209 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6210 handle->xrun[1] = false;
\r
6212 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6213 stream_.bufferSize, streamTime, status, info->userData );
\r
6214 if ( cbReturnValue == 2 ) {
\r
6215 stream_.state = STREAM_STOPPING;
\r
6216 handle->drainCounter = 2;
\r
6220 else if ( cbReturnValue == 1 ) {
\r
6221 handle->drainCounter = 1;
\r
6222 handle->internalDrain = true;
\r
6227 DWORD currentWritePointer, safeWritePointer;
\r
6228 DWORD currentReadPointer, safeReadPointer;
\r
6229 UINT nextWritePointer;
\r
6231 LPVOID buffer1 = NULL;
\r
6232 LPVOID buffer2 = NULL;
\r
6233 DWORD bufferSize1 = 0;
\r
6234 DWORD bufferSize2 = 0;
\r
6239 MUTEX_LOCK( &stream_.mutex );
\r
6240 if ( stream_.state == STREAM_STOPPED ) {
\r
6241 MUTEX_UNLOCK( &stream_.mutex );
\r
6245 if ( buffersRolling == false ) {
\r
6246 if ( stream_.mode == DUPLEX ) {
\r
6247 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6249 // It takes a while for the devices to get rolling. As a result,
\r
6250 // there's no guarantee that the capture and write device pointers
\r
6251 // will move in lockstep. Wait here for both devices to start
\r
6252 // rolling, and then set our buffer pointers accordingly.
\r
6253 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6254 // bytes later than the write buffer.
\r
6256 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6257 // take place between the two GetCurrentPosition calls... but I'm
\r
6258 // really not sure how to solve the problem. Temporarily boost to
\r
6259 // Realtime priority, maybe; but I'm not sure what priority the
\r
6260 // DirectSound service threads run at. We *should* be roughly
\r
6261 // within a ms or so of correct.
\r
6263 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6264 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6266 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6268 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6269 if ( FAILED( result ) ) {
\r
6270 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6271 errorText_ = errorStream_.str();
\r
6272 error( RtAudioError::SYSTEM_ERROR );
\r
6275 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6276 if ( FAILED( result ) ) {
\r
6277 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6278 errorText_ = errorStream_.str();
\r
6279 error( RtAudioError::SYSTEM_ERROR );
\r
6283 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6284 if ( FAILED( result ) ) {
\r
6285 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6286 errorText_ = errorStream_.str();
\r
6287 error( RtAudioError::SYSTEM_ERROR );
\r
6290 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6291 if ( FAILED( result ) ) {
\r
6292 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6293 errorText_ = errorStream_.str();
\r
6294 error( RtAudioError::SYSTEM_ERROR );
\r
6297 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6301 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6303 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6304 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6305 handle->bufferPointer[1] = safeReadPointer;
\r
6307 else if ( stream_.mode == OUTPUT ) {
\r
6309 // Set the proper nextWritePosition after initial startup.
\r
6310 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6311 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6312 if ( FAILED( result ) ) {
\r
6313 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6314 errorText_ = errorStream_.str();
\r
6315 error( RtAudioError::SYSTEM_ERROR );
\r
6318 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6319 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6322 buffersRolling = true;
\r
6325 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6327 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6329 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6330 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6331 bufferBytes *= formatBytes( stream_.userFormat );
\r
6332 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6335 // Setup parameters and do buffer conversion if necessary.
\r
6336 if ( stream_.doConvertBuffer[0] ) {
\r
6337 buffer = stream_.deviceBuffer;
\r
6338 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6339 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6340 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6343 buffer = stream_.userBuffer[0];
\r
6344 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6345 bufferBytes *= formatBytes( stream_.userFormat );
\r
6348 // No byte swapping necessary in DirectSound implementation.
\r
6350 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6351 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6353 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6354 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6356 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6357 nextWritePointer = handle->bufferPointer[0];
\r
6359 DWORD endWrite, leadPointer;
\r
6361 // Find out where the read and "safe write" pointers are.
\r
6362 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6363 if ( FAILED( result ) ) {
\r
6364 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6365 errorText_ = errorStream_.str();
\r
6366 error( RtAudioError::SYSTEM_ERROR );
\r
6370 // We will copy our output buffer into the region between
\r
6371 // safeWritePointer and leadPointer. If leadPointer is not
\r
6372 // beyond the next endWrite position, wait until it is.
\r
6373 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6374 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6375 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6376 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6377 endWrite = nextWritePointer + bufferBytes;
\r
6379 // Check whether the entire write region is behind the play pointer.
\r
6380 if ( leadPointer >= endWrite ) break;
\r
6382 // If we are here, then we must wait until the leadPointer advances
\r
6383 // beyond the end of our next write region. We use the
\r
6384 // Sleep() function to suspend operation until that happens.
\r
6385 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6386 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6387 if ( millis < 1.0 ) millis = 1.0;
\r
6388 Sleep( (DWORD) millis );
\r
6391 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6392 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6393 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6394 handle->xrun[0] = true;
\r
6395 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6396 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6397 handle->bufferPointer[0] = nextWritePointer;
\r
6398 endWrite = nextWritePointer + bufferBytes;
\r
6401 // Lock free space in the buffer
\r
6402 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6403 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6404 if ( FAILED( result ) ) {
\r
6405 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6406 errorText_ = errorStream_.str();
\r
6407 error( RtAudioError::SYSTEM_ERROR );
\r
6411 // Copy our buffer into the DS buffer
\r
6412 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6413 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6415 // Update our buffer offset and unlock sound buffer
\r
6416 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6417 if ( FAILED( result ) ) {
\r
6418 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6419 errorText_ = errorStream_.str();
\r
6420 error( RtAudioError::SYSTEM_ERROR );
\r
6423 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6424 handle->bufferPointer[0] = nextWritePointer;
\r
6427 // Don't bother draining input
\r
6428 if ( handle->drainCounter ) {
\r
6429 handle->drainCounter++;
\r
6433 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6435 // Setup parameters.
\r
6436 if ( stream_.doConvertBuffer[1] ) {
\r
6437 buffer = stream_.deviceBuffer;
\r
6438 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6439 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6442 buffer = stream_.userBuffer[1];
\r
6443 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6444 bufferBytes *= formatBytes( stream_.userFormat );
\r
6447 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6448 long nextReadPointer = handle->bufferPointer[1];
\r
6449 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6451 // Find out where the write and "safe read" pointers are.
\r
6452 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6453 if ( FAILED( result ) ) {
\r
6454 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6455 errorText_ = errorStream_.str();
\r
6456 error( RtAudioError::SYSTEM_ERROR );
\r
6460 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6461 DWORD endRead = nextReadPointer + bufferBytes;
\r
6463 // Handling depends on whether we are INPUT or DUPLEX.
\r
6464 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6465 // then a wait here will drag the write pointers into the forbidden zone.
\r
6467 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6468 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6469 // practical way to sync up the read and write pointers reliably, given the
\r
6470 // the very complex relationship between phase and increment of the read and write
\r
6473 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6474 // provide a pre-roll period of 0.5 seconds in which we return
\r
6475 // zeros from the read buffer while the pointers sync up.
\r
6477 if ( stream_.mode == DUPLEX ) {
\r
6478 if ( safeReadPointer < endRead ) {
\r
6479 if ( duplexPrerollBytes <= 0 ) {
\r
6480 // Pre-roll time over. Be more agressive.
\r
6481 int adjustment = endRead-safeReadPointer;
\r
6483 handle->xrun[1] = true;
\r
6485 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6486 // and perform fine adjustments later.
\r
6487 // - small adjustments: back off by twice as much.
\r
6488 if ( adjustment >= 2*bufferBytes )
\r
6489 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6491 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6493 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6497 // In pre=roll time. Just do it.
\r
6498 nextReadPointer = safeReadPointer - bufferBytes;
\r
6499 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6501 endRead = nextReadPointer + bufferBytes;
\r
6504 else { // mode == INPUT
\r
6505 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6506 // See comments for playback.
\r
6507 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6508 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6509 if ( millis < 1.0 ) millis = 1.0;
\r
6510 Sleep( (DWORD) millis );
\r
6512 // Wake up and find out where we are now.
\r
6513 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6514 if ( FAILED( result ) ) {
\r
6515 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6516 errorText_ = errorStream_.str();
\r
6517 error( RtAudioError::SYSTEM_ERROR );
\r
6521 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6525 // Lock free space in the buffer
\r
6526 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6527 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6528 if ( FAILED( result ) ) {
\r
6529 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6530 errorText_ = errorStream_.str();
\r
6531 error( RtAudioError::SYSTEM_ERROR );
\r
6535 if ( duplexPrerollBytes <= 0 ) {
\r
6536 // Copy our buffer into the DS buffer
\r
6537 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6538 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6541 memset( buffer, 0, bufferSize1 );
\r
6542 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6543 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6546 // Update our buffer offset and unlock sound buffer
\r
6547 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6548 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6549 if ( FAILED( result ) ) {
\r
6550 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6551 errorText_ = errorStream_.str();
\r
6552 error( RtAudioError::SYSTEM_ERROR );
\r
6555 handle->bufferPointer[1] = nextReadPointer;
\r
6557 // No byte swapping necessary in DirectSound implementation.
\r
6559 // If necessary, convert 8-bit data from unsigned to signed.
\r
6560 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6561 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6563 // Do buffer conversion if necessary.
\r
6564 if ( stream_.doConvertBuffer[1] )
\r
6565 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6569 MUTEX_UNLOCK( &stream_.mutex );
\r
6570 RtApi::tickStreamTime();
\r
6573 // Definitions for utility functions and callbacks
\r
6574 // specific to the DirectSound implementation.
\r
6576 static unsigned __stdcall callbackHandler( void *ptr )
\r
6578 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6579 RtApiDs *object = (RtApiDs *) info->object;
\r
6580 bool* isRunning = &info->isRunning;
\r
6582 while ( *isRunning == true ) {
\r
6583 object->callbackEvent();
\r
6586 _endthreadex( 0 );
\r
6590 #include "tchar.h"
\r
6592 static std::string convertTChar( LPCTSTR name )
\r
6594 #if defined( UNICODE ) || defined( _UNICODE )
\r
6595 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6596 std::string s( length-1, '\0' );
\r
6597 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6599 std::string s( name );
\r
6605 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6606 LPCTSTR description,
\r
6607 LPCTSTR /*module*/,
\r
6608 LPVOID lpContext )
\r
6610 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6611 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6614 bool validDevice = false;
\r
6615 if ( probeInfo.isInput == true ) {
\r
6617 LPDIRECTSOUNDCAPTURE object;
\r
6619 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6620 if ( hr != DS_OK ) return TRUE;
\r
6622 caps.dwSize = sizeof(caps);
\r
6623 hr = object->GetCaps( &caps );
\r
6624 if ( hr == DS_OK ) {
\r
6625 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6626 validDevice = true;
\r
6628 object->Release();
\r
6632 LPDIRECTSOUND object;
\r
6633 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6634 if ( hr != DS_OK ) return TRUE;
\r
6636 caps.dwSize = sizeof(caps);
\r
6637 hr = object->GetCaps( &caps );
\r
6638 if ( hr == DS_OK ) {
\r
6639 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6640 validDevice = true;
\r
6642 object->Release();
\r
6645 // If good device, then save its name and guid.
\r
6646 std::string name = convertTChar( description );
\r
6647 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6648 if ( lpguid == NULL )
\r
6649 name = "Default Device";
\r
6650 if ( validDevice ) {
\r
6651 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6652 if ( dsDevices[i].name == name ) {
\r
6653 dsDevices[i].found = true;
\r
6654 if ( probeInfo.isInput ) {
\r
6655 dsDevices[i].id[1] = lpguid;
\r
6656 dsDevices[i].validId[1] = true;
\r
6659 dsDevices[i].id[0] = lpguid;
\r
6660 dsDevices[i].validId[0] = true;
\r
6667 device.name = name;
\r
6668 device.found = true;
\r
6669 if ( probeInfo.isInput ) {
\r
6670 device.id[1] = lpguid;
\r
6671 device.validId[1] = true;
\r
6674 device.id[0] = lpguid;
\r
6675 device.validId[0] = true;
\r
6677 dsDevices.push_back( device );
\r
6683 static const char* getErrorString( int code )
\r
6687 case DSERR_ALLOCATED:
\r
6688 return "Already allocated";
\r
6690 case DSERR_CONTROLUNAVAIL:
\r
6691 return "Control unavailable";
\r
6693 case DSERR_INVALIDPARAM:
\r
6694 return "Invalid parameter";
\r
6696 case DSERR_INVALIDCALL:
\r
6697 return "Invalid call";
\r
6699 case DSERR_GENERIC:
\r
6700 return "Generic error";
\r
6702 case DSERR_PRIOLEVELNEEDED:
\r
6703 return "Priority level needed";
\r
6705 case DSERR_OUTOFMEMORY:
\r
6706 return "Out of memory";
\r
6708 case DSERR_BADFORMAT:
\r
6709 return "The sample rate or the channel format is not supported";
\r
6711 case DSERR_UNSUPPORTED:
\r
6712 return "Not supported";
\r
6714 case DSERR_NODRIVER:
\r
6715 return "No driver";
\r
6717 case DSERR_ALREADYINITIALIZED:
\r
6718 return "Already initialized";
\r
6720 case DSERR_NOAGGREGATION:
\r
6721 return "No aggregation";
\r
6723 case DSERR_BUFFERLOST:
\r
6724 return "Buffer lost";
\r
6726 case DSERR_OTHERAPPHASPRIO:
\r
6727 return "Another application already has priority";
\r
6729 case DSERR_UNINITIALIZED:
\r
6730 return "Uninitialized";
\r
6733 return "DirectSound unknown error";
\r
6736 //******************** End of __WINDOWS_DS__ *********************//
\r
6740 #if defined(__LINUX_ALSA__)
\r
6742 #include <alsa/asoundlib.h>
\r
6743 #include <unistd.h>
\r
6745 // A structure to hold various information related to the ALSA API
\r
6746 // implementation.
\r
6747 struct AlsaHandle {
\r
6748 snd_pcm_t *handles[2];
\r
6749 bool synchronized;
\r
6751 pthread_cond_t runnable_cv;
\r
6755 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6758 static void *alsaCallbackHandler( void * ptr );
\r
6760 RtApiAlsa :: RtApiAlsa()
\r
6762 // Nothing to do here.
\r
6765 RtApiAlsa :: ~RtApiAlsa()
\r
6767 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6770 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6772 unsigned nDevices = 0;
\r
6773 int result, subdevice, card;
\r
6775 snd_ctl_t *handle;
\r
6777 // Count cards and devices
\r
6779 snd_card_next( &card );
\r
6780 while ( card >= 0 ) {
\r
6781 sprintf( name, "hw:%d", card );
\r
6782 result = snd_ctl_open( &handle, name, 0 );
\r
6783 if ( result < 0 ) {
\r
6784 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6785 errorText_ = errorStream_.str();
\r
6786 error( RtAudioError::WARNING );
\r
6791 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6792 if ( result < 0 ) {
\r
6793 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6794 errorText_ = errorStream_.str();
\r
6795 error( RtAudioError::WARNING );
\r
6798 if ( subdevice < 0 )
\r
6803 snd_ctl_close( handle );
\r
6804 snd_card_next( &card );
\r
6807 result = snd_ctl_open( &handle, "default", 0 );
\r
6808 if (result == 0) {
\r
6810 snd_ctl_close( handle );
\r
6816 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6818 RtAudio::DeviceInfo info;
\r
6819 info.probed = false;
\r
6821 unsigned nDevices = 0;
\r
6822 int result, subdevice, card;
\r
6824 snd_ctl_t *chandle;
\r
6826 // Count cards and devices
\r
6828 snd_card_next( &card );
\r
6829 while ( card >= 0 ) {
\r
6830 sprintf( name, "hw:%d", card );
\r
6831 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6832 if ( result < 0 ) {
\r
6833 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6834 errorText_ = errorStream_.str();
\r
6835 error( RtAudioError::WARNING );
\r
6840 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6841 if ( result < 0 ) {
\r
6842 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6843 errorText_ = errorStream_.str();
\r
6844 error( RtAudioError::WARNING );
\r
6847 if ( subdevice < 0 ) break;
\r
6848 if ( nDevices == device ) {
\r
6849 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6855 snd_ctl_close( chandle );
\r
6856 snd_card_next( &card );
\r
6859 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6860 if ( result == 0 ) {
\r
6861 if ( nDevices == device ) {
\r
6862 strcpy( name, "default" );
\r
6868 if ( nDevices == 0 ) {
\r
6869 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6870 error( RtAudioError::INVALID_USE );
\r
6874 if ( device >= nDevices ) {
\r
6875 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6876 error( RtAudioError::INVALID_USE );
\r
6882 // If a stream is already open, we cannot probe the stream devices.
\r
6883 // Thus, use the saved results.
\r
6884 if ( stream_.state != STREAM_CLOSED &&
\r
6885 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6886 snd_ctl_close( chandle );
\r
6887 if ( device >= devices_.size() ) {
\r
6888 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6889 error( RtAudioError::WARNING );
\r
6892 return devices_[ device ];
\r
6895 int openMode = SND_PCM_ASYNC;
\r
6896 snd_pcm_stream_t stream;
\r
6897 snd_pcm_info_t *pcminfo;
\r
6898 snd_pcm_info_alloca( &pcminfo );
\r
6899 snd_pcm_t *phandle;
\r
6900 snd_pcm_hw_params_t *params;
\r
6901 snd_pcm_hw_params_alloca( ¶ms );
\r
6903 // First try for playback unless default device (which has subdev -1)
\r
6904 stream = SND_PCM_STREAM_PLAYBACK;
\r
6905 snd_pcm_info_set_stream( pcminfo, stream );
\r
6906 if ( subdevice != -1 ) {
\r
6907 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6908 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6910 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6911 if ( result < 0 ) {
\r
6912 // Device probably doesn't support playback.
\r
6913 goto captureProbe;
\r
6917 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6918 if ( result < 0 ) {
\r
6919 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6920 errorText_ = errorStream_.str();
\r
6921 error( RtAudioError::WARNING );
\r
6922 goto captureProbe;
\r
6925 // The device is open ... fill the parameter structure.
\r
6926 result = snd_pcm_hw_params_any( phandle, params );
\r
6927 if ( result < 0 ) {
\r
6928 snd_pcm_close( phandle );
\r
6929 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6930 errorText_ = errorStream_.str();
\r
6931 error( RtAudioError::WARNING );
\r
6932 goto captureProbe;
\r
6935 // Get output channel information.
\r
6936 unsigned int value;
\r
6937 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6938 if ( result < 0 ) {
\r
6939 snd_pcm_close( phandle );
\r
6940 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6941 errorText_ = errorStream_.str();
\r
6942 error( RtAudioError::WARNING );
\r
6943 goto captureProbe;
\r
6945 info.outputChannels = value;
\r
6946 snd_pcm_close( phandle );
\r
6949 stream = SND_PCM_STREAM_CAPTURE;
\r
6950 snd_pcm_info_set_stream( pcminfo, stream );
\r
6952 // Now try for capture unless default device (with subdev = -1)
\r
6953 if ( subdevice != -1 ) {
\r
6954 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6955 snd_ctl_close( chandle );
\r
6956 if ( result < 0 ) {
\r
6957 // Device probably doesn't support capture.
\r
6958 if ( info.outputChannels == 0 ) return info;
\r
6959 goto probeParameters;
\r
6963 snd_ctl_close( chandle );
\r
6965 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6966 if ( result < 0 ) {
\r
6967 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6968 errorText_ = errorStream_.str();
\r
6969 error( RtAudioError::WARNING );
\r
6970 if ( info.outputChannels == 0 ) return info;
\r
6971 goto probeParameters;
\r
6974 // The device is open ... fill the parameter structure.
\r
6975 result = snd_pcm_hw_params_any( phandle, params );
\r
6976 if ( result < 0 ) {
\r
6977 snd_pcm_close( phandle );
\r
6978 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6979 errorText_ = errorStream_.str();
\r
6980 error( RtAudioError::WARNING );
\r
6981 if ( info.outputChannels == 0 ) return info;
\r
6982 goto probeParameters;
\r
6985 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6986 if ( result < 0 ) {
\r
6987 snd_pcm_close( phandle );
\r
6988 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6989 errorText_ = errorStream_.str();
\r
6990 error( RtAudioError::WARNING );
\r
6991 if ( info.outputChannels == 0 ) return info;
\r
6992 goto probeParameters;
\r
6994 info.inputChannels = value;
\r
6995 snd_pcm_close( phandle );
\r
6997 // If device opens for both playback and capture, we determine the channels.
\r
6998 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6999 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7001 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7002 if ( device == 0 && info.outputChannels > 0 )
\r
7003 info.isDefaultOutput = true;
\r
7004 if ( device == 0 && info.inputChannels > 0 )
\r
7005 info.isDefaultInput = true;
\r
7008 // At this point, we just need to figure out the supported data
\r
7009 // formats and sample rates. We'll proceed by opening the device in
\r
7010 // the direction with the maximum number of channels, or playback if
\r
7011 // they are equal. This might limit our sample rate options, but so
\r
7014 if ( info.outputChannels >= info.inputChannels )
\r
7015 stream = SND_PCM_STREAM_PLAYBACK;
\r
7017 stream = SND_PCM_STREAM_CAPTURE;
\r
7018 snd_pcm_info_set_stream( pcminfo, stream );
\r
7020 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7021 if ( result < 0 ) {
\r
7022 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7023 errorText_ = errorStream_.str();
\r
7024 error( RtAudioError::WARNING );
\r
7028 // The device is open ... fill the parameter structure.
\r
7029 result = snd_pcm_hw_params_any( phandle, params );
\r
7030 if ( result < 0 ) {
\r
7031 snd_pcm_close( phandle );
\r
7032 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7033 errorText_ = errorStream_.str();
\r
7034 error( RtAudioError::WARNING );
\r
7038 // Test our discrete set of sample rate values.
\r
7039 info.sampleRates.clear();
\r
7040 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7041 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7042 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7044 if ( info.sampleRates.size() == 0 ) {
\r
7045 snd_pcm_close( phandle );
\r
7046 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7047 errorText_ = errorStream_.str();
\r
7048 error( RtAudioError::WARNING );
\r
7052 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7053 snd_pcm_format_t format;
\r
7054 info.nativeFormats = 0;
\r
7055 format = SND_PCM_FORMAT_S8;
\r
7056 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7057 info.nativeFormats |= RTAUDIO_SINT8;
\r
7058 format = SND_PCM_FORMAT_S16;
\r
7059 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7060 info.nativeFormats |= RTAUDIO_SINT16;
\r
7061 format = SND_PCM_FORMAT_S24;
\r
7062 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7063 info.nativeFormats |= RTAUDIO_SINT24;
\r
7064 format = SND_PCM_FORMAT_S32;
\r
7065 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7066 info.nativeFormats |= RTAUDIO_SINT32;
\r
7067 format = SND_PCM_FORMAT_FLOAT;
\r
7068 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7069 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7070 format = SND_PCM_FORMAT_FLOAT64;
\r
7071 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7072 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7074 // Check that we have at least one supported format
\r
7075 if ( info.nativeFormats == 0 ) {
\r
7076 snd_pcm_close( phandle );
\r
7077 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7078 errorText_ = errorStream_.str();
\r
7079 error( RtAudioError::WARNING );
\r
7083 // Get the device name
\r
7085 result = snd_card_get_name( card, &cardname );
\r
7086 if ( result >= 0 ) {
\r
7087 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7092 // That's all ... close the device and return
\r
7093 snd_pcm_close( phandle );
\r
7094 info.probed = true;
\r
7098 void RtApiAlsa :: saveDeviceInfo( void )
\r
7102 unsigned int nDevices = getDeviceCount();
\r
7103 devices_.resize( nDevices );
\r
7104 for ( unsigned int i=0; i<nDevices; i++ )
\r
7105 devices_[i] = getDeviceInfo( i );
\r
7108 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7109 unsigned int firstChannel, unsigned int sampleRate,
\r
7110 RtAudioFormat format, unsigned int *bufferSize,
\r
7111 RtAudio::StreamOptions *options )
\r
7114 #if defined(__RTAUDIO_DEBUG__)
\r
7115 snd_output_t *out;
\r
7116 snd_output_stdio_attach(&out, stderr, 0);
\r
7119 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7121 unsigned nDevices = 0;
\r
7122 int result, subdevice, card;
\r
7124 snd_ctl_t *chandle;
\r
7126 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7127 snprintf(name, sizeof(name), "%s", "default");
\r
7129 // Count cards and devices
\r
7131 snd_card_next( &card );
\r
7132 while ( card >= 0 ) {
\r
7133 sprintf( name, "hw:%d", card );
\r
7134 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7135 if ( result < 0 ) {
\r
7136 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7137 errorText_ = errorStream_.str();
\r
7142 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7143 if ( result < 0 ) break;
\r
7144 if ( subdevice < 0 ) break;
\r
7145 if ( nDevices == device ) {
\r
7146 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7147 snd_ctl_close( chandle );
\r
7152 snd_ctl_close( chandle );
\r
7153 snd_card_next( &card );
\r
7156 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7157 if ( result == 0 ) {
\r
7158 if ( nDevices == device ) {
\r
7159 strcpy( name, "default" );
\r
7165 if ( nDevices == 0 ) {
\r
7166 // This should not happen because a check is made before this function is called.
\r
7167 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7171 if ( device >= nDevices ) {
\r
7172 // This should not happen because a check is made before this function is called.
\r
7173 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7180 // The getDeviceInfo() function will not work for a device that is
\r
7181 // already open. Thus, we'll probe the system before opening a
\r
7182 // stream and save the results for use by getDeviceInfo().
\r
7183 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7184 this->saveDeviceInfo();
\r
7186 snd_pcm_stream_t stream;
\r
7187 if ( mode == OUTPUT )
\r
7188 stream = SND_PCM_STREAM_PLAYBACK;
\r
7190 stream = SND_PCM_STREAM_CAPTURE;
\r
7192 snd_pcm_t *phandle;
\r
7193 int openMode = SND_PCM_ASYNC;
\r
7194 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7195 if ( result < 0 ) {
\r
7196 if ( mode == OUTPUT )
\r
7197 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7199 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7200 errorText_ = errorStream_.str();
\r
7204 // Fill the parameter structure.
\r
7205 snd_pcm_hw_params_t *hw_params;
\r
7206 snd_pcm_hw_params_alloca( &hw_params );
\r
7207 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7208 if ( result < 0 ) {
\r
7209 snd_pcm_close( phandle );
\r
7210 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7211 errorText_ = errorStream_.str();
\r
7215 #if defined(__RTAUDIO_DEBUG__)
\r
7216 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7217 snd_pcm_hw_params_dump( hw_params, out );
\r
7220 // Set access ... check user preference.
\r
7221 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7222 stream_.userInterleaved = false;
\r
7223 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7224 if ( result < 0 ) {
\r
7225 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7226 stream_.deviceInterleaved[mode] = true;
\r
7229 stream_.deviceInterleaved[mode] = false;
\r
7232 stream_.userInterleaved = true;
\r
7233 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7234 if ( result < 0 ) {
\r
7235 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7236 stream_.deviceInterleaved[mode] = false;
\r
7239 stream_.deviceInterleaved[mode] = true;
\r
7242 if ( result < 0 ) {
\r
7243 snd_pcm_close( phandle );
\r
7244 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7245 errorText_ = errorStream_.str();
\r
7249 // Determine how to set the device format.
\r
7250 stream_.userFormat = format;
\r
7251 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7253 if ( format == RTAUDIO_SINT8 )
\r
7254 deviceFormat = SND_PCM_FORMAT_S8;
\r
7255 else if ( format == RTAUDIO_SINT16 )
\r
7256 deviceFormat = SND_PCM_FORMAT_S16;
\r
7257 else if ( format == RTAUDIO_SINT24 )
\r
7258 deviceFormat = SND_PCM_FORMAT_S24;
\r
7259 else if ( format == RTAUDIO_SINT32 )
\r
7260 deviceFormat = SND_PCM_FORMAT_S32;
\r
7261 else if ( format == RTAUDIO_FLOAT32 )
\r
7262 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7263 else if ( format == RTAUDIO_FLOAT64 )
\r
7264 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7266 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7267 stream_.deviceFormat[mode] = format;
\r
7271 // The user requested format is not natively supported by the device.
\r
7272 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7273 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7274 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7278 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7279 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7280 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7284 deviceFormat = SND_PCM_FORMAT_S32;
\r
7285 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7286 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7290 deviceFormat = SND_PCM_FORMAT_S24;
\r
7291 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7292 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7296 deviceFormat = SND_PCM_FORMAT_S16;
\r
7297 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7298 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7302 deviceFormat = SND_PCM_FORMAT_S8;
\r
7303 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7304 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7308 // If we get here, no supported format was found.
\r
7309 snd_pcm_close( phandle );
\r
7310 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7311 errorText_ = errorStream_.str();
\r
7315 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7316 if ( result < 0 ) {
\r
7317 snd_pcm_close( phandle );
\r
7318 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7319 errorText_ = errorStream_.str();
\r
7323 // Determine whether byte-swaping is necessary.
\r
7324 stream_.doByteSwap[mode] = false;
\r
7325 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7326 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7327 if ( result == 0 )
\r
7328 stream_.doByteSwap[mode] = true;
\r
7329 else if (result < 0) {
\r
7330 snd_pcm_close( phandle );
\r
7331 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7332 errorText_ = errorStream_.str();
\r
7337 // Set the sample rate.
\r
7338 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7339 if ( result < 0 ) {
\r
7340 snd_pcm_close( phandle );
\r
7341 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7342 errorText_ = errorStream_.str();
\r
7346 // Determine the number of channels for this device. We support a possible
\r
7347 // minimum device channel number > than the value requested by the user.
\r
7348 stream_.nUserChannels[mode] = channels;
\r
7349 unsigned int value;
\r
7350 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7351 unsigned int deviceChannels = value;
\r
7352 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7353 snd_pcm_close( phandle );
\r
7354 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7355 errorText_ = errorStream_.str();
\r
7359 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7360 if ( result < 0 ) {
\r
7361 snd_pcm_close( phandle );
\r
7362 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7363 errorText_ = errorStream_.str();
\r
7366 deviceChannels = value;
\r
7367 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7368 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7370 // Set the device channels.
\r
7371 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7372 if ( result < 0 ) {
\r
7373 snd_pcm_close( phandle );
\r
7374 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7375 errorText_ = errorStream_.str();
\r
7379 // Set the buffer (or period) size.
\r
7381 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7382 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7383 if ( result < 0 ) {
\r
7384 snd_pcm_close( phandle );
\r
7385 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7386 errorText_ = errorStream_.str();
\r
7389 *bufferSize = periodSize;
\r
7391 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7392 unsigned int periods = 0;
\r
7393 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7394 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7395 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7396 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7397 if ( result < 0 ) {
\r
7398 snd_pcm_close( phandle );
\r
7399 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7400 errorText_ = errorStream_.str();
\r
7404 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7405 // MUST be the same in both directions!
\r
7406 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7407 snd_pcm_close( phandle );
\r
7408 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7409 errorText_ = errorStream_.str();
\r
7413 stream_.bufferSize = *bufferSize;
\r
7415 // Install the hardware configuration
\r
7416 result = snd_pcm_hw_params( phandle, hw_params );
\r
7417 if ( result < 0 ) {
\r
7418 snd_pcm_close( phandle );
\r
7419 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7420 errorText_ = errorStream_.str();
\r
7424 #if defined(__RTAUDIO_DEBUG__)
\r
7425 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7426 snd_pcm_hw_params_dump( hw_params, out );
\r
7429 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7430 snd_pcm_sw_params_t *sw_params = NULL;
\r
7431 snd_pcm_sw_params_alloca( &sw_params );
\r
7432 snd_pcm_sw_params_current( phandle, sw_params );
\r
7433 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7434 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7435 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7437 // The following two settings were suggested by Theo Veenker
\r
7438 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7439 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7441 // here are two options for a fix
\r
7442 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7443 snd_pcm_uframes_t val;
\r
7444 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7445 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7447 result = snd_pcm_sw_params( phandle, sw_params );
\r
7448 if ( result < 0 ) {
\r
7449 snd_pcm_close( phandle );
\r
7450 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7451 errorText_ = errorStream_.str();
\r
7455 #if defined(__RTAUDIO_DEBUG__)
\r
7456 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7457 snd_pcm_sw_params_dump( sw_params, out );
\r
7460 // Set flags for buffer conversion
\r
7461 stream_.doConvertBuffer[mode] = false;
\r
7462 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7463 stream_.doConvertBuffer[mode] = true;
\r
7464 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7465 stream_.doConvertBuffer[mode] = true;
\r
7466 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7467 stream_.nUserChannels[mode] > 1 )
\r
7468 stream_.doConvertBuffer[mode] = true;
\r
7470 // Allocate the ApiHandle if necessary and then save.
\r
7471 AlsaHandle *apiInfo = 0;
\r
7472 if ( stream_.apiHandle == 0 ) {
\r
7474 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7476 catch ( std::bad_alloc& ) {
\r
7477 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7481 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7482 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7486 stream_.apiHandle = (void *) apiInfo;
\r
7487 apiInfo->handles[0] = 0;
\r
7488 apiInfo->handles[1] = 0;
\r
7491 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7493 apiInfo->handles[mode] = phandle;
\r
7496 // Allocate necessary internal buffers.
\r
7497 unsigned long bufferBytes;
\r
7498 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7499 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7500 if ( stream_.userBuffer[mode] == NULL ) {
\r
7501 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7505 if ( stream_.doConvertBuffer[mode] ) {
\r
7507 bool makeBuffer = true;
\r
7508 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7509 if ( mode == INPUT ) {
\r
7510 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7511 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7512 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7516 if ( makeBuffer ) {
\r
7517 bufferBytes *= *bufferSize;
\r
7518 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7519 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7520 if ( stream_.deviceBuffer == NULL ) {
\r
7521 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7527 stream_.sampleRate = sampleRate;
\r
7528 stream_.nBuffers = periods;
\r
7529 stream_.device[mode] = device;
\r
7530 stream_.state = STREAM_STOPPED;
\r
7532 // Setup the buffer conversion information structure.
\r
7533 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7535 // Setup thread if necessary.
\r
7536 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7537 // We had already set up an output stream.
\r
7538 stream_.mode = DUPLEX;
\r
7539 // Link the streams if possible.
\r
7540 apiInfo->synchronized = false;
\r
7541 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7542 apiInfo->synchronized = true;
\r
7544 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7545 error( RtAudioError::WARNING );
\r
7549 stream_.mode = mode;
\r
7551 // Setup callback thread.
\r
7552 stream_.callbackInfo.object = (void *) this;
\r
7554 // Set the thread attributes for joinable and realtime scheduling
\r
7555 // priority (optional). The higher priority will only take affect
\r
7556 // if the program is run as root or suid. Note, under Linux
\r
7557 // processes with CAP_SYS_NICE privilege, a user can change
\r
7558 // scheduling policy and priority (thus need not be root). See
\r
7559 // POSIX "capabilities".
\r
7560 pthread_attr_t attr;
\r
7561 pthread_attr_init( &attr );
\r
7562 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7564 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7565 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7566 // We previously attempted to increase the audio callback priority
\r
7567 // to SCHED_RR here via the attributes. However, while no errors
\r
7568 // were reported in doing so, it did not work. So, now this is
\r
7569 // done in the alsaCallbackHandler function.
\r
7570 stream_.callbackInfo.doRealtime = true;
\r
7571 int priority = options->priority;
\r
7572 int min = sched_get_priority_min( SCHED_RR );
\r
7573 int max = sched_get_priority_max( SCHED_RR );
\r
7574 if ( priority < min ) priority = min;
\r
7575 else if ( priority > max ) priority = max;
\r
7576 stream_.callbackInfo.priority = priority;
\r
7580 stream_.callbackInfo.isRunning = true;
\r
7581 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7582 pthread_attr_destroy( &attr );
\r
7584 stream_.callbackInfo.isRunning = false;
\r
7585 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7594 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7595 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7596 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7598 stream_.apiHandle = 0;
\r
7601 if ( phandle) snd_pcm_close( phandle );
\r
7603 for ( int i=0; i<2; i++ ) {
\r
7604 if ( stream_.userBuffer[i] ) {
\r
7605 free( stream_.userBuffer[i] );
\r
7606 stream_.userBuffer[i] = 0;
\r
7610 if ( stream_.deviceBuffer ) {
\r
7611 free( stream_.deviceBuffer );
\r
7612 stream_.deviceBuffer = 0;
\r
7615 stream_.state = STREAM_CLOSED;
\r
7619 void RtApiAlsa :: closeStream()
\r
7621 if ( stream_.state == STREAM_CLOSED ) {
\r
7622 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7623 error( RtAudioError::WARNING );
\r
7627 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7628 stream_.callbackInfo.isRunning = false;
\r
7629 MUTEX_LOCK( &stream_.mutex );
\r
7630 if ( stream_.state == STREAM_STOPPED ) {
\r
7631 apiInfo->runnable = true;
\r
7632 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7634 MUTEX_UNLOCK( &stream_.mutex );
\r
7635 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7637 if ( stream_.state == STREAM_RUNNING ) {
\r
7638 stream_.state = STREAM_STOPPED;
\r
7639 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7640 snd_pcm_drop( apiInfo->handles[0] );
\r
7641 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7642 snd_pcm_drop( apiInfo->handles[1] );
\r
7646 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7647 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7648 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7650 stream_.apiHandle = 0;
\r
7653 for ( int i=0; i<2; i++ ) {
\r
7654 if ( stream_.userBuffer[i] ) {
\r
7655 free( stream_.userBuffer[i] );
\r
7656 stream_.userBuffer[i] = 0;
\r
7660 if ( stream_.deviceBuffer ) {
\r
7661 free( stream_.deviceBuffer );
\r
7662 stream_.deviceBuffer = 0;
\r
7665 stream_.mode = UNINITIALIZED;
\r
7666 stream_.state = STREAM_CLOSED;
\r
7669 void RtApiAlsa :: startStream()
\r
7671 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7674 if ( stream_.state == STREAM_RUNNING ) {
\r
7675 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7676 error( RtAudioError::WARNING );
\r
7680 MUTEX_LOCK( &stream_.mutex );
\r
7683 snd_pcm_state_t state;
\r
7684 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7685 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7686 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7687 state = snd_pcm_state( handle[0] );
\r
7688 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7689 result = snd_pcm_prepare( handle[0] );
\r
7690 if ( result < 0 ) {
\r
7691 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7692 errorText_ = errorStream_.str();
\r
7698 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7699 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7700 state = snd_pcm_state( handle[1] );
\r
7701 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7702 result = snd_pcm_prepare( handle[1] );
\r
7703 if ( result < 0 ) {
\r
7704 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7705 errorText_ = errorStream_.str();
\r
7711 stream_.state = STREAM_RUNNING;
\r
7714 apiInfo->runnable = true;
\r
7715 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7716 MUTEX_UNLOCK( &stream_.mutex );
\r
7718 if ( result >= 0 ) return;
\r
7719 error( RtAudioError::SYSTEM_ERROR );
\r
7722 void RtApiAlsa :: stopStream()
\r
7725 if ( stream_.state == STREAM_STOPPED ) {
\r
7726 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7727 error( RtAudioError::WARNING );
\r
7731 stream_.state = STREAM_STOPPED;
\r
7732 MUTEX_LOCK( &stream_.mutex );
\r
7735 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7736 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7737 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7738 if ( apiInfo->synchronized )
\r
7739 result = snd_pcm_drop( handle[0] );
\r
7741 result = snd_pcm_drain( handle[0] );
\r
7742 if ( result < 0 ) {
\r
7743 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7744 errorText_ = errorStream_.str();
\r
7749 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7750 result = snd_pcm_drop( handle[1] );
\r
7751 if ( result < 0 ) {
\r
7752 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7753 errorText_ = errorStream_.str();
\r
7759 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7760 MUTEX_UNLOCK( &stream_.mutex );
\r
7762 if ( result >= 0 ) return;
\r
7763 error( RtAudioError::SYSTEM_ERROR );
\r
7766 void RtApiAlsa :: abortStream()
\r
7769 if ( stream_.state == STREAM_STOPPED ) {
\r
7770 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7771 error( RtAudioError::WARNING );
\r
7775 stream_.state = STREAM_STOPPED;
\r
7776 MUTEX_LOCK( &stream_.mutex );
\r
7779 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7780 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7781 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7782 result = snd_pcm_drop( handle[0] );
\r
7783 if ( result < 0 ) {
\r
7784 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7785 errorText_ = errorStream_.str();
\r
7790 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7791 result = snd_pcm_drop( handle[1] );
\r
7792 if ( result < 0 ) {
\r
7793 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7794 errorText_ = errorStream_.str();
\r
7800 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7801 MUTEX_UNLOCK( &stream_.mutex );
\r
7803 if ( result >= 0 ) return;
\r
7804 error( RtAudioError::SYSTEM_ERROR );
\r
7807 void RtApiAlsa :: callbackEvent()
\r
7809 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7810 if ( stream_.state == STREAM_STOPPED ) {
\r
7811 MUTEX_LOCK( &stream_.mutex );
\r
7812 while ( !apiInfo->runnable )
\r
7813 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7815 if ( stream_.state != STREAM_RUNNING ) {
\r
7816 MUTEX_UNLOCK( &stream_.mutex );
\r
7819 MUTEX_UNLOCK( &stream_.mutex );
\r
7822 if ( stream_.state == STREAM_CLOSED ) {
\r
7823 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7824 error( RtAudioError::WARNING );
\r
7828 int doStopStream = 0;
\r
7829 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7830 double streamTime = getStreamTime();
\r
7831 RtAudioStreamStatus status = 0;
\r
7832 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7833 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7834 apiInfo->xrun[0] = false;
\r
7836 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7837 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7838 apiInfo->xrun[1] = false;
\r
7840 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7841 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7843 if ( doStopStream == 2 ) {
\r
7848 MUTEX_LOCK( &stream_.mutex );
\r
7850 // The state might change while waiting on a mutex.
\r
7851 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7856 snd_pcm_t **handle;
\r
7857 snd_pcm_sframes_t frames;
\r
7858 RtAudioFormat format;
\r
7859 handle = (snd_pcm_t **) apiInfo->handles;
\r
7861 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7863 // Setup parameters.
\r
7864 if ( stream_.doConvertBuffer[1] ) {
\r
7865 buffer = stream_.deviceBuffer;
\r
7866 channels = stream_.nDeviceChannels[1];
\r
7867 format = stream_.deviceFormat[1];
\r
7870 buffer = stream_.userBuffer[1];
\r
7871 channels = stream_.nUserChannels[1];
\r
7872 format = stream_.userFormat;
\r
7875 // Read samples from device in interleaved/non-interleaved format.
\r
7876 if ( stream_.deviceInterleaved[1] )
\r
7877 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7879 void *bufs[channels];
\r
7880 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7881 for ( int i=0; i<channels; i++ )
\r
7882 bufs[i] = (void *) (buffer + (i * offset));
\r
7883 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7886 if ( result < (int) stream_.bufferSize ) {
\r
7887 // Either an error or overrun occured.
\r
7888 if ( result == -EPIPE ) {
\r
7889 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7890 if ( state == SND_PCM_STATE_XRUN ) {
\r
7891 apiInfo->xrun[1] = true;
\r
7892 result = snd_pcm_prepare( handle[1] );
\r
7893 if ( result < 0 ) {
\r
7894 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7895 errorText_ = errorStream_.str();
\r
7899 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7900 errorText_ = errorStream_.str();
\r
7904 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7905 errorText_ = errorStream_.str();
\r
7907 error( RtAudioError::WARNING );
\r
7911 // Do byte swapping if necessary.
\r
7912 if ( stream_.doByteSwap[1] )
\r
7913 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7915 // Do buffer conversion if necessary.
\r
7916 if ( stream_.doConvertBuffer[1] )
\r
7917 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7919 // Check stream latency
\r
7920 result = snd_pcm_delay( handle[1], &frames );
\r
7921 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7926 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7928 // Setup parameters and do buffer conversion if necessary.
\r
7929 if ( stream_.doConvertBuffer[0] ) {
\r
7930 buffer = stream_.deviceBuffer;
\r
7931 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7932 channels = stream_.nDeviceChannels[0];
\r
7933 format = stream_.deviceFormat[0];
\r
7936 buffer = stream_.userBuffer[0];
\r
7937 channels = stream_.nUserChannels[0];
\r
7938 format = stream_.userFormat;
\r
7941 // Do byte swapping if necessary.
\r
7942 if ( stream_.doByteSwap[0] )
\r
7943 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7945 // Write samples to device in interleaved/non-interleaved format.
\r
7946 if ( stream_.deviceInterleaved[0] )
\r
7947 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7949 void *bufs[channels];
\r
7950 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7951 for ( int i=0; i<channels; i++ )
\r
7952 bufs[i] = (void *) (buffer + (i * offset));
\r
7953 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7956 if ( result < (int) stream_.bufferSize ) {
\r
7957 // Either an error or underrun occured.
\r
7958 if ( result == -EPIPE ) {
\r
7959 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7960 if ( state == SND_PCM_STATE_XRUN ) {
\r
7961 apiInfo->xrun[0] = true;
\r
7962 result = snd_pcm_prepare( handle[0] );
\r
7963 if ( result < 0 ) {
\r
7964 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7965 errorText_ = errorStream_.str();
\r
7969 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7970 errorText_ = errorStream_.str();
\r
7974 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7975 errorText_ = errorStream_.str();
\r
7977 error( RtAudioError::WARNING );
\r
7981 // Check stream latency
\r
7982 result = snd_pcm_delay( handle[0], &frames );
\r
7983 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7987 MUTEX_UNLOCK( &stream_.mutex );
\r
7989 RtApi::tickStreamTime();
\r
7990 if ( doStopStream == 1 ) this->stopStream();
\r
7993 static void *alsaCallbackHandler( void *ptr )
\r
7995 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7996 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7997 bool *isRunning = &info->isRunning;
\r
7999 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8000 if ( &info->doRealtime ) {
\r
8001 pthread_t tID = pthread_self(); // ID of this thread
\r
8002 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8003 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8007 while ( *isRunning == true ) {
\r
8008 pthread_testcancel();
\r
8009 object->callbackEvent();
\r
8012 pthread_exit( NULL );
\r
8015 //******************** End of __LINUX_ALSA__ *********************//
\r
8018 #if defined(__LINUX_PULSE__)
\r
8020 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8021 // and Tristan Matthews.
\r
8023 #include <pulse/error.h>
\r
8024 #include <pulse/simple.h>
\r
8027 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8028 44100, 48000, 96000, 0};
\r
8030 struct rtaudio_pa_format_mapping_t {
\r
8031 RtAudioFormat rtaudio_format;
\r
8032 pa_sample_format_t pa_format;
\r
8035 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8036 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8037 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8038 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8039 {0, PA_SAMPLE_INVALID}};
\r
8041 struct PulseAudioHandle {
\r
8042 pa_simple *s_play;
\r
8045 pthread_cond_t runnable_cv;
\r
8047 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8050 RtApiPulse::~RtApiPulse()
\r
8052 if ( stream_.state != STREAM_CLOSED )
\r
8056 unsigned int RtApiPulse::getDeviceCount( void )
\r
8061 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8063 RtAudio::DeviceInfo info;
\r
8064 info.probed = true;
\r
8065 info.name = "PulseAudio";
\r
8066 info.outputChannels = 2;
\r
8067 info.inputChannels = 2;
\r
8068 info.duplexChannels = 2;
\r
8069 info.isDefaultOutput = true;
\r
8070 info.isDefaultInput = true;
\r
8072 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8073 info.sampleRates.push_back( *sr );
\r
8075 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8080 static void *pulseaudio_callback( void * user )
\r
8082 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8083 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8084 volatile bool *isRunning = &cbi->isRunning;
\r
8086 while ( *isRunning ) {
\r
8087 pthread_testcancel();
\r
8088 context->callbackEvent();
\r
8091 pthread_exit( NULL );
\r
8094 void RtApiPulse::closeStream( void )
\r
8096 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8098 stream_.callbackInfo.isRunning = false;
\r
8100 MUTEX_LOCK( &stream_.mutex );
\r
8101 if ( stream_.state == STREAM_STOPPED ) {
\r
8102 pah->runnable = true;
\r
8103 pthread_cond_signal( &pah->runnable_cv );
\r
8105 MUTEX_UNLOCK( &stream_.mutex );
\r
8107 pthread_join( pah->thread, 0 );
\r
8108 if ( pah->s_play ) {
\r
8109 pa_simple_flush( pah->s_play, NULL );
\r
8110 pa_simple_free( pah->s_play );
\r
8113 pa_simple_free( pah->s_rec );
\r
8115 pthread_cond_destroy( &pah->runnable_cv );
\r
8117 stream_.apiHandle = 0;
\r
8120 if ( stream_.userBuffer[0] ) {
\r
8121 free( stream_.userBuffer[0] );
\r
8122 stream_.userBuffer[0] = 0;
\r
8124 if ( stream_.userBuffer[1] ) {
\r
8125 free( stream_.userBuffer[1] );
\r
8126 stream_.userBuffer[1] = 0;
\r
8129 stream_.state = STREAM_CLOSED;
\r
8130 stream_.mode = UNINITIALIZED;
\r
8133 void RtApiPulse::callbackEvent( void )
\r
8135 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8137 if ( stream_.state == STREAM_STOPPED ) {
\r
8138 MUTEX_LOCK( &stream_.mutex );
\r
8139 while ( !pah->runnable )
\r
8140 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8142 if ( stream_.state != STREAM_RUNNING ) {
\r
8143 MUTEX_UNLOCK( &stream_.mutex );
\r
8146 MUTEX_UNLOCK( &stream_.mutex );
\r
8149 if ( stream_.state == STREAM_CLOSED ) {
\r
8150 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8151 "this shouldn't happen!";
\r
8152 error( RtAudioError::WARNING );
\r
8156 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8157 double streamTime = getStreamTime();
\r
8158 RtAudioStreamStatus status = 0;
\r
8159 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8160 stream_.bufferSize, streamTime, status,
\r
8161 stream_.callbackInfo.userData );
\r
8163 if ( doStopStream == 2 ) {
\r
8168 MUTEX_LOCK( &stream_.mutex );
\r
8169 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8170 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8172 if ( stream_.state != STREAM_RUNNING )
\r
8177 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8178 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8179 convertBuffer( stream_.deviceBuffer,
\r
8180 stream_.userBuffer[OUTPUT],
\r
8181 stream_.convertInfo[OUTPUT] );
\r
8182 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8183 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8185 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8186 formatBytes( stream_.userFormat );
\r
8188 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8189 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8190 pa_strerror( pa_error ) << ".";
\r
8191 errorText_ = errorStream_.str();
\r
8192 error( RtAudioError::WARNING );
\r
8196 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8197 if ( stream_.doConvertBuffer[INPUT] )
\r
8198 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8199 formatBytes( stream_.deviceFormat[INPUT] );
\r
8201 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8202 formatBytes( stream_.userFormat );
\r
8204 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8205 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8206 pa_strerror( pa_error ) << ".";
\r
8207 errorText_ = errorStream_.str();
\r
8208 error( RtAudioError::WARNING );
\r
8210 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8211 convertBuffer( stream_.userBuffer[INPUT],
\r
8212 stream_.deviceBuffer,
\r
8213 stream_.convertInfo[INPUT] );
\r
8218 MUTEX_UNLOCK( &stream_.mutex );
\r
8219 RtApi::tickStreamTime();
\r
8221 if ( doStopStream == 1 )
\r
8225 void RtApiPulse::startStream( void )
\r
8227 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8229 if ( stream_.state == STREAM_CLOSED ) {
\r
8230 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8231 error( RtAudioError::INVALID_USE );
\r
8234 if ( stream_.state == STREAM_RUNNING ) {
\r
8235 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8236 error( RtAudioError::WARNING );
\r
8240 MUTEX_LOCK( &stream_.mutex );
\r
8242 stream_.state = STREAM_RUNNING;
\r
8244 pah->runnable = true;
\r
8245 pthread_cond_signal( &pah->runnable_cv );
\r
8246 MUTEX_UNLOCK( &stream_.mutex );
\r
8249 void RtApiPulse::stopStream( void )
\r
8251 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8253 if ( stream_.state == STREAM_CLOSED ) {
\r
8254 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8255 error( RtAudioError::INVALID_USE );
\r
8258 if ( stream_.state == STREAM_STOPPED ) {
\r
8259 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8260 error( RtAudioError::WARNING );
\r
8264 stream_.state = STREAM_STOPPED;
\r
8265 MUTEX_LOCK( &stream_.mutex );
\r
8267 if ( pah && pah->s_play ) {
\r
8269 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8270 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8271 pa_strerror( pa_error ) << ".";
\r
8272 errorText_ = errorStream_.str();
\r
8273 MUTEX_UNLOCK( &stream_.mutex );
\r
8274 error( RtAudioError::SYSTEM_ERROR );
\r
8279 stream_.state = STREAM_STOPPED;
\r
8280 MUTEX_UNLOCK( &stream_.mutex );
\r
8283 void RtApiPulse::abortStream( void )
\r
8285 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8287 if ( stream_.state == STREAM_CLOSED ) {
\r
8288 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8289 error( RtAudioError::INVALID_USE );
\r
8292 if ( stream_.state == STREAM_STOPPED ) {
\r
8293 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8294 error( RtAudioError::WARNING );
\r
8298 stream_.state = STREAM_STOPPED;
\r
8299 MUTEX_LOCK( &stream_.mutex );
\r
8301 if ( pah && pah->s_play ) {
\r
8303 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8304 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8305 pa_strerror( pa_error ) << ".";
\r
8306 errorText_ = errorStream_.str();
\r
8307 MUTEX_UNLOCK( &stream_.mutex );
\r
8308 error( RtAudioError::SYSTEM_ERROR );
\r
8313 stream_.state = STREAM_STOPPED;
\r
8314 MUTEX_UNLOCK( &stream_.mutex );
\r
8317 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8318 unsigned int channels, unsigned int firstChannel,
\r
8319 unsigned int sampleRate, RtAudioFormat format,
\r
8320 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8322 PulseAudioHandle *pah = 0;
\r
8323 unsigned long bufferBytes = 0;
\r
8324 pa_sample_spec ss;
\r
8326 if ( device != 0 ) return false;
\r
8327 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8328 if ( channels != 1 && channels != 2 ) {
\r
8329 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8332 ss.channels = channels;
\r
8334 if ( firstChannel != 0 ) return false;
\r
8336 bool sr_found = false;
\r
8337 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8338 if ( sampleRate == *sr ) {
\r
8340 stream_.sampleRate = sampleRate;
\r
8341 ss.rate = sampleRate;
\r
8345 if ( !sr_found ) {
\r
8346 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8350 bool sf_found = 0;
\r
8351 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8352 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8353 if ( format == sf->rtaudio_format ) {
\r
8355 stream_.userFormat = sf->rtaudio_format;
\r
8356 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8357 ss.format = sf->pa_format;
\r
8361 if ( !sf_found ) { // Use internal data format conversion.
\r
8362 stream_.userFormat = format;
\r
8363 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8364 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8367 // Set other stream parameters.
\r
8368 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8369 else stream_.userInterleaved = true;
\r
8370 stream_.deviceInterleaved[mode] = true;
\r
8371 stream_.nBuffers = 1;
\r
8372 stream_.doByteSwap[mode] = false;
\r
8373 stream_.nUserChannels[mode] = channels;
\r
8374 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8375 stream_.channelOffset[mode] = 0;
\r
8376 std::string streamName = "RtAudio";
\r
8378 // Set flags for buffer conversion.
\r
8379 stream_.doConvertBuffer[mode] = false;
\r
8380 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8381 stream_.doConvertBuffer[mode] = true;
\r
8382 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8383 stream_.doConvertBuffer[mode] = true;
\r
8385 // Allocate necessary internal buffers.
\r
8386 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8387 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8388 if ( stream_.userBuffer[mode] == NULL ) {
\r
8389 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8392 stream_.bufferSize = *bufferSize;
\r
8394 if ( stream_.doConvertBuffer[mode] ) {
\r
8396 bool makeBuffer = true;
\r
8397 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8398 if ( mode == INPUT ) {
\r
8399 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8400 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8401 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8405 if ( makeBuffer ) {
\r
8406 bufferBytes *= *bufferSize;
\r
8407 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8408 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8409 if ( stream_.deviceBuffer == NULL ) {
\r
8410 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8416 stream_.device[mode] = device;
\r
8418 // Setup the buffer conversion information structure.
\r
8419 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8421 if ( !stream_.apiHandle ) {
\r
8422 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8424 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8428 stream_.apiHandle = pah;
\r
8429 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8430 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8434 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8437 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8440 pa_buffer_attr buffer_attr;
\r
8441 buffer_attr.fragsize = bufferBytes;
\r
8442 buffer_attr.maxlength = -1;
\r
8444 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8445 if ( !pah->s_rec ) {
\r
8446 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8451 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8452 if ( !pah->s_play ) {
\r
8453 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8461 if ( stream_.mode == UNINITIALIZED )
\r
8462 stream_.mode = mode;
\r
8463 else if ( stream_.mode == mode )
\r
8466 stream_.mode = DUPLEX;
\r
8468 if ( !stream_.callbackInfo.isRunning ) {
\r
8469 stream_.callbackInfo.object = this;
\r
8470 stream_.callbackInfo.isRunning = true;
\r
8471 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8472 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8477 stream_.state = STREAM_STOPPED;
\r
8481 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8482 pthread_cond_destroy( &pah->runnable_cv );
\r
8484 stream_.apiHandle = 0;
\r
8487 for ( int i=0; i<2; i++ ) {
\r
8488 if ( stream_.userBuffer[i] ) {
\r
8489 free( stream_.userBuffer[i] );
\r
8490 stream_.userBuffer[i] = 0;
\r
8494 if ( stream_.deviceBuffer ) {
\r
8495 free( stream_.deviceBuffer );
\r
8496 stream_.deviceBuffer = 0;
\r
8502 //******************** End of __LINUX_PULSE__ *********************//
\r
8505 #if defined(__LINUX_OSS__)
\r
8507 #include <unistd.h>
\r
8508 #include <sys/ioctl.h>
\r
8509 #include <unistd.h>
\r
8510 #include <fcntl.h>
\r
8511 #include <sys/soundcard.h>
\r
8512 #include <errno.h>
\r
8515 static void *ossCallbackHandler(void * ptr);
\r
8517 // A structure to hold various information related to the OSS API
\r
8518 // implementation.
\r
8519 struct OssHandle {
\r
8520 int id[2]; // device ids
\r
8523 pthread_cond_t runnable;
\r
8526 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8529 RtApiOss :: RtApiOss()
\r
8531 // Nothing to do here.
\r
8534 RtApiOss :: ~RtApiOss()
\r
8536 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8539 unsigned int RtApiOss :: getDeviceCount( void )
\r
8541 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8542 if ( mixerfd == -1 ) {
\r
8543 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8544 error( RtAudioError::WARNING );
\r
8548 oss_sysinfo sysinfo;
\r
8549 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8551 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8552 error( RtAudioError::WARNING );
\r
8557 return sysinfo.numaudios;
\r
8560 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8562 RtAudio::DeviceInfo info;
\r
8563 info.probed = false;
\r
8565 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8566 if ( mixerfd == -1 ) {
\r
8567 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8568 error( RtAudioError::WARNING );
\r
8572 oss_sysinfo sysinfo;
\r
8573 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8574 if ( result == -1 ) {
\r
8576 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8577 error( RtAudioError::WARNING );
\r
8581 unsigned nDevices = sysinfo.numaudios;
\r
8582 if ( nDevices == 0 ) {
\r
8584 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8585 error( RtAudioError::INVALID_USE );
\r
8589 if ( device >= nDevices ) {
\r
8591 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8592 error( RtAudioError::INVALID_USE );
\r
8596 oss_audioinfo ainfo;
\r
8597 ainfo.dev = device;
\r
8598 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8600 if ( result == -1 ) {
\r
8601 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8602 errorText_ = errorStream_.str();
\r
8603 error( RtAudioError::WARNING );
\r
8608 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8609 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8610 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8611 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8612 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8615 // Probe data formats ... do for input
\r
8616 unsigned long mask = ainfo.iformats;
\r
8617 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8618 info.nativeFormats |= RTAUDIO_SINT16;
\r
8619 if ( mask & AFMT_S8 )
\r
8620 info.nativeFormats |= RTAUDIO_SINT8;
\r
8621 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8622 info.nativeFormats |= RTAUDIO_SINT32;
\r
8623 if ( mask & AFMT_FLOAT )
\r
8624 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8625 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8626 info.nativeFormats |= RTAUDIO_SINT24;
\r
8628 // Check that we have at least one supported format
\r
8629 if ( info.nativeFormats == 0 ) {
\r
8630 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8631 errorText_ = errorStream_.str();
\r
8632 error( RtAudioError::WARNING );
\r
8636 // Probe the supported sample rates.
\r
8637 info.sampleRates.clear();
\r
8638 if ( ainfo.nrates ) {
\r
8639 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8640 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8641 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8642 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8649 // Check min and max rate values;
\r
8650 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8651 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8652 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8656 if ( info.sampleRates.size() == 0 ) {
\r
8657 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8658 errorText_ = errorStream_.str();
\r
8659 error( RtAudioError::WARNING );
\r
8662 info.probed = true;
\r
8663 info.name = ainfo.name;
\r
8670 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8671 unsigned int firstChannel, unsigned int sampleRate,
\r
8672 RtAudioFormat format, unsigned int *bufferSize,
\r
8673 RtAudio::StreamOptions *options )
\r
8675 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8676 if ( mixerfd == -1 ) {
\r
8677 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8681 oss_sysinfo sysinfo;
\r
8682 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8683 if ( result == -1 ) {
\r
8685 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8689 unsigned nDevices = sysinfo.numaudios;
\r
8690 if ( nDevices == 0 ) {
\r
8691 // This should not happen because a check is made before this function is called.
\r
8693 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8697 if ( device >= nDevices ) {
\r
8698 // This should not happen because a check is made before this function is called.
\r
8700 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8704 oss_audioinfo ainfo;
\r
8705 ainfo.dev = device;
\r
8706 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8708 if ( result == -1 ) {
\r
8709 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8710 errorText_ = errorStream_.str();
\r
8714 // Check if device supports input or output
\r
8715 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8716 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8717 if ( mode == OUTPUT )
\r
8718 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8720 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8721 errorText_ = errorStream_.str();
\r
8726 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8727 if ( mode == OUTPUT )
\r
8728 flags |= O_WRONLY;
\r
8729 else { // mode == INPUT
\r
8730 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8731 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8732 close( handle->id[0] );
\r
8733 handle->id[0] = 0;
\r
8734 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8735 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8736 errorText_ = errorStream_.str();
\r
8739 // Check that the number previously set channels is the same.
\r
8740 if ( stream_.nUserChannels[0] != channels ) {
\r
8741 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8742 errorText_ = errorStream_.str();
\r
8748 flags |= O_RDONLY;
\r
8751 // Set exclusive access if specified.
\r
8752 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8754 // Try to open the device.
\r
8756 fd = open( ainfo.devnode, flags, 0 );
\r
8758 if ( errno == EBUSY )
\r
8759 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8761 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8762 errorText_ = errorStream_.str();
\r
8766 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8768 if ( flags | O_RDWR ) {
\r
8769 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8770 if ( result == -1) {
\r
8771 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8772 errorText_ = errorStream_.str();
\r
8778 // Check the device channel support.
\r
8779 stream_.nUserChannels[mode] = channels;
\r
8780 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8782 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8783 errorText_ = errorStream_.str();
\r
8787 // Set the number of channels.
\r
8788 int deviceChannels = channels + firstChannel;
\r
8789 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8790 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8792 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8793 errorText_ = errorStream_.str();
\r
8796 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8798 // Get the data format mask
\r
8800 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8801 if ( result == -1 ) {
\r
8803 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8804 errorText_ = errorStream_.str();
\r
8808 // Determine how to set the device format.
\r
8809 stream_.userFormat = format;
\r
8810 int deviceFormat = -1;
\r
8811 stream_.doByteSwap[mode] = false;
\r
8812 if ( format == RTAUDIO_SINT8 ) {
\r
8813 if ( mask & AFMT_S8 ) {
\r
8814 deviceFormat = AFMT_S8;
\r
8815 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8818 else if ( format == RTAUDIO_SINT16 ) {
\r
8819 if ( mask & AFMT_S16_NE ) {
\r
8820 deviceFormat = AFMT_S16_NE;
\r
8821 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8823 else if ( mask & AFMT_S16_OE ) {
\r
8824 deviceFormat = AFMT_S16_OE;
\r
8825 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8826 stream_.doByteSwap[mode] = true;
\r
8829 else if ( format == RTAUDIO_SINT24 ) {
\r
8830 if ( mask & AFMT_S24_NE ) {
\r
8831 deviceFormat = AFMT_S24_NE;
\r
8832 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8834 else if ( mask & AFMT_S24_OE ) {
\r
8835 deviceFormat = AFMT_S24_OE;
\r
8836 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8837 stream_.doByteSwap[mode] = true;
\r
8840 else if ( format == RTAUDIO_SINT32 ) {
\r
8841 if ( mask & AFMT_S32_NE ) {
\r
8842 deviceFormat = AFMT_S32_NE;
\r
8843 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8845 else if ( mask & AFMT_S32_OE ) {
\r
8846 deviceFormat = AFMT_S32_OE;
\r
8847 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8848 stream_.doByteSwap[mode] = true;
\r
8852 if ( deviceFormat == -1 ) {
\r
8853 // The user requested format is not natively supported by the device.
\r
8854 if ( mask & AFMT_S16_NE ) {
\r
8855 deviceFormat = AFMT_S16_NE;
\r
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8858 else if ( mask & AFMT_S32_NE ) {
\r
8859 deviceFormat = AFMT_S32_NE;
\r
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8862 else if ( mask & AFMT_S24_NE ) {
\r
8863 deviceFormat = AFMT_S24_NE;
\r
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8866 else if ( mask & AFMT_S16_OE ) {
\r
8867 deviceFormat = AFMT_S16_OE;
\r
8868 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8869 stream_.doByteSwap[mode] = true;
\r
8871 else if ( mask & AFMT_S32_OE ) {
\r
8872 deviceFormat = AFMT_S32_OE;
\r
8873 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8874 stream_.doByteSwap[mode] = true;
\r
8876 else if ( mask & AFMT_S24_OE ) {
\r
8877 deviceFormat = AFMT_S24_OE;
\r
8878 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8879 stream_.doByteSwap[mode] = true;
\r
8881 else if ( mask & AFMT_S8) {
\r
8882 deviceFormat = AFMT_S8;
\r
8883 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8887 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8888 // This really shouldn't happen ...
\r
8890 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8891 errorText_ = errorStream_.str();
\r
8895 // Set the data format.
\r
8896 int temp = deviceFormat;
\r
8897 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8898 if ( result == -1 || deviceFormat != temp ) {
\r
8900 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8901 errorText_ = errorStream_.str();
\r
8905 // Attempt to set the buffer size. According to OSS, the minimum
\r
8906 // number of buffers is two. The supposed minimum buffer size is 16
\r
8907 // bytes, so that will be our lower bound. The argument to this
\r
8908 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8909 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8910 // We'll check the actual value used near the end of the setup
\r
8912 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8913 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8915 if ( options ) buffers = options->numberOfBuffers;
\r
8916 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8917 if ( buffers < 2 ) buffers = 3;
\r
8918 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8919 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8920 if ( result == -1 ) {
\r
8922 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8923 errorText_ = errorStream_.str();
\r
8926 stream_.nBuffers = buffers;
\r
8928 // Save buffer size (in sample frames).
\r
8929 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8930 stream_.bufferSize = *bufferSize;
\r
8932 // Set the sample rate.
\r
8933 int srate = sampleRate;
\r
8934 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8935 if ( result == -1 ) {
\r
8937 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8938 errorText_ = errorStream_.str();
\r
8942 // Verify the sample rate setup worked.
\r
8943 if ( abs( srate - sampleRate ) > 100 ) {
\r
8945 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8946 errorText_ = errorStream_.str();
\r
8949 stream_.sampleRate = sampleRate;
\r
8951 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8952 // We're doing duplex setup here.
\r
8953 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8954 stream_.nDeviceChannels[0] = deviceChannels;
\r
8957 // Set interleaving parameters.
\r
8958 stream_.userInterleaved = true;
\r
8959 stream_.deviceInterleaved[mode] = true;
\r
8960 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8961 stream_.userInterleaved = false;
\r
8963 // Set flags for buffer conversion
\r
8964 stream_.doConvertBuffer[mode] = false;
\r
8965 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8966 stream_.doConvertBuffer[mode] = true;
\r
8967 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8968 stream_.doConvertBuffer[mode] = true;
\r
8969 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8970 stream_.nUserChannels[mode] > 1 )
\r
8971 stream_.doConvertBuffer[mode] = true;
\r
8973 // Allocate the stream handles if necessary and then save.
\r
8974 if ( stream_.apiHandle == 0 ) {
\r
8976 handle = new OssHandle;
\r
8978 catch ( std::bad_alloc& ) {
\r
8979 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8983 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8984 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8988 stream_.apiHandle = (void *) handle;
\r
8991 handle = (OssHandle *) stream_.apiHandle;
\r
8993 handle->id[mode] = fd;
\r
8995 // Allocate necessary internal buffers.
\r
8996 unsigned long bufferBytes;
\r
8997 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8998 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8999 if ( stream_.userBuffer[mode] == NULL ) {
\r
9000 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9004 if ( stream_.doConvertBuffer[mode] ) {
\r
9006 bool makeBuffer = true;
\r
9007 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9008 if ( mode == INPUT ) {
\r
9009 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9010 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9011 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9015 if ( makeBuffer ) {
\r
9016 bufferBytes *= *bufferSize;
\r
9017 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9018 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9019 if ( stream_.deviceBuffer == NULL ) {
\r
9020 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9026 stream_.device[mode] = device;
\r
9027 stream_.state = STREAM_STOPPED;
\r
9029 // Setup the buffer conversion information structure.
\r
9030 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9032 // Setup thread if necessary.
\r
9033 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9034 // We had already set up an output stream.
\r
9035 stream_.mode = DUPLEX;
\r
9036 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9039 stream_.mode = mode;
\r
9041 // Setup callback thread.
\r
9042 stream_.callbackInfo.object = (void *) this;
\r
9044 // Set the thread attributes for joinable and realtime scheduling
\r
9045 // priority. The higher priority will only take affect if the
\r
9046 // program is run as root or suid.
\r
9047 pthread_attr_t attr;
\r
9048 pthread_attr_init( &attr );
\r
9049 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9050 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9051 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9052 struct sched_param param;
\r
9053 int priority = options->priority;
\r
9054 int min = sched_get_priority_min( SCHED_RR );
\r
9055 int max = sched_get_priority_max( SCHED_RR );
\r
9056 if ( priority < min ) priority = min;
\r
9057 else if ( priority > max ) priority = max;
\r
9058 param.sched_priority = priority;
\r
9059 pthread_attr_setschedparam( &attr, ¶m );
\r
9060 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9063 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9065 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9068 stream_.callbackInfo.isRunning = true;
\r
9069 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9070 pthread_attr_destroy( &attr );
\r
9072 stream_.callbackInfo.isRunning = false;
\r
9073 errorText_ = "RtApiOss::error creating callback thread!";
\r
9082 pthread_cond_destroy( &handle->runnable );
\r
9083 if ( handle->id[0] ) close( handle->id[0] );
\r
9084 if ( handle->id[1] ) close( handle->id[1] );
\r
9086 stream_.apiHandle = 0;
\r
9089 for ( int i=0; i<2; i++ ) {
\r
9090 if ( stream_.userBuffer[i] ) {
\r
9091 free( stream_.userBuffer[i] );
\r
9092 stream_.userBuffer[i] = 0;
\r
9096 if ( stream_.deviceBuffer ) {
\r
9097 free( stream_.deviceBuffer );
\r
9098 stream_.deviceBuffer = 0;
\r
9104 void RtApiOss :: closeStream()
\r
9106 if ( stream_.state == STREAM_CLOSED ) {
\r
9107 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9108 error( RtAudioError::WARNING );
\r
9112 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9113 stream_.callbackInfo.isRunning = false;
\r
9114 MUTEX_LOCK( &stream_.mutex );
\r
9115 if ( stream_.state == STREAM_STOPPED )
\r
9116 pthread_cond_signal( &handle->runnable );
\r
9117 MUTEX_UNLOCK( &stream_.mutex );
\r
9118 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9120 if ( stream_.state == STREAM_RUNNING ) {
\r
9121 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9122 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9124 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9125 stream_.state = STREAM_STOPPED;
\r
9129 pthread_cond_destroy( &handle->runnable );
\r
9130 if ( handle->id[0] ) close( handle->id[0] );
\r
9131 if ( handle->id[1] ) close( handle->id[1] );
\r
9133 stream_.apiHandle = 0;
\r
9136 for ( int i=0; i<2; i++ ) {
\r
9137 if ( stream_.userBuffer[i] ) {
\r
9138 free( stream_.userBuffer[i] );
\r
9139 stream_.userBuffer[i] = 0;
\r
9143 if ( stream_.deviceBuffer ) {
\r
9144 free( stream_.deviceBuffer );
\r
9145 stream_.deviceBuffer = 0;
\r
9148 stream_.mode = UNINITIALIZED;
\r
9149 stream_.state = STREAM_CLOSED;
\r
9152 void RtApiOss :: startStream()
\r
9155 if ( stream_.state == STREAM_RUNNING ) {
\r
9156 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9157 error( RtAudioError::WARNING );
\r
9161 MUTEX_LOCK( &stream_.mutex );
\r
9163 stream_.state = STREAM_RUNNING;
\r
9165 // No need to do anything else here ... OSS automatically starts
\r
9166 // when fed samples.
\r
9168 MUTEX_UNLOCK( &stream_.mutex );
\r
9170 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9171 pthread_cond_signal( &handle->runnable );
\r
9174 void RtApiOss :: stopStream()
\r
9177 if ( stream_.state == STREAM_STOPPED ) {
\r
9178 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9179 error( RtAudioError::WARNING );
\r
9183 MUTEX_LOCK( &stream_.mutex );
\r
9185 // The state might change while waiting on a mutex.
\r
9186 if ( stream_.state == STREAM_STOPPED ) {
\r
9187 MUTEX_UNLOCK( &stream_.mutex );
\r
9192 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9193 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9195 // Flush the output with zeros a few times.
\r
9198 RtAudioFormat format;
\r
9200 if ( stream_.doConvertBuffer[0] ) {
\r
9201 buffer = stream_.deviceBuffer;
\r
9202 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9203 format = stream_.deviceFormat[0];
\r
9206 buffer = stream_.userBuffer[0];
\r
9207 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9208 format = stream_.userFormat;
\r
9211 memset( buffer, 0, samples * formatBytes(format) );
\r
9212 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9213 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9214 if ( result == -1 ) {
\r
9215 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9216 error( RtAudioError::WARNING );
\r
9220 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9221 if ( result == -1 ) {
\r
9222 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9223 errorText_ = errorStream_.str();
\r
9226 handle->triggered = false;
\r
9229 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9230 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9231 if ( result == -1 ) {
\r
9232 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9233 errorText_ = errorStream_.str();
\r
9239 stream_.state = STREAM_STOPPED;
\r
9240 MUTEX_UNLOCK( &stream_.mutex );
\r
9242 if ( result != -1 ) return;
\r
9243 error( RtAudioError::SYSTEM_ERROR );
\r
9246 void RtApiOss :: abortStream()
\r
9249 if ( stream_.state == STREAM_STOPPED ) {
\r
9250 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9251 error( RtAudioError::WARNING );
\r
9255 MUTEX_LOCK( &stream_.mutex );
\r
9257 // The state might change while waiting on a mutex.
\r
9258 if ( stream_.state == STREAM_STOPPED ) {
\r
9259 MUTEX_UNLOCK( &stream_.mutex );
\r
9264 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9265 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9266 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9267 if ( result == -1 ) {
\r
9268 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9269 errorText_ = errorStream_.str();
\r
9272 handle->triggered = false;
\r
9275 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9276 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9277 if ( result == -1 ) {
\r
9278 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9279 errorText_ = errorStream_.str();
\r
9285 stream_.state = STREAM_STOPPED;
\r
9286 MUTEX_UNLOCK( &stream_.mutex );
\r
9288 if ( result != -1 ) return;
\r
9289 error( RtAudioError::SYSTEM_ERROR );
\r
9292 void RtApiOss :: callbackEvent()
\r
9294 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9295 if ( stream_.state == STREAM_STOPPED ) {
\r
9296 MUTEX_LOCK( &stream_.mutex );
\r
9297 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9298 if ( stream_.state != STREAM_RUNNING ) {
\r
9299 MUTEX_UNLOCK( &stream_.mutex );
\r
9302 MUTEX_UNLOCK( &stream_.mutex );
\r
9305 if ( stream_.state == STREAM_CLOSED ) {
\r
9306 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9307 error( RtAudioError::WARNING );
\r
9311 // Invoke user callback to get fresh output data.
\r
9312 int doStopStream = 0;
\r
9313 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9314 double streamTime = getStreamTime();
\r
9315 RtAudioStreamStatus status = 0;
\r
9316 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9317 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9318 handle->xrun[0] = false;
\r
9320 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9321 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9322 handle->xrun[1] = false;
\r
9324 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9325 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9326 if ( doStopStream == 2 ) {
\r
9327 this->abortStream();
\r
9331 MUTEX_LOCK( &stream_.mutex );
\r
9333 // The state might change while waiting on a mutex.
\r
9334 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9339 RtAudioFormat format;
\r
9341 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9343 // Setup parameters and do buffer conversion if necessary.
\r
9344 if ( stream_.doConvertBuffer[0] ) {
\r
9345 buffer = stream_.deviceBuffer;
\r
9346 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9347 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9348 format = stream_.deviceFormat[0];
\r
9351 buffer = stream_.userBuffer[0];
\r
9352 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9353 format = stream_.userFormat;
\r
9356 // Do byte swapping if necessary.
\r
9357 if ( stream_.doByteSwap[0] )
\r
9358 byteSwapBuffer( buffer, samples, format );
\r
9360 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9362 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9363 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9364 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9365 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9366 handle->triggered = true;
\r
9369 // Write samples to device.
\r
9370 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9372 if ( result == -1 ) {
\r
9373 // We'll assume this is an underrun, though there isn't a
\r
9374 // specific means for determining that.
\r
9375 handle->xrun[0] = true;
\r
9376 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9377 error( RtAudioError::WARNING );
\r
9378 // Continue on to input section.
\r
9382 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9384 // Setup parameters.
\r
9385 if ( stream_.doConvertBuffer[1] ) {
\r
9386 buffer = stream_.deviceBuffer;
\r
9387 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9388 format = stream_.deviceFormat[1];
\r
9391 buffer = stream_.userBuffer[1];
\r
9392 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9393 format = stream_.userFormat;
\r
9396 // Read samples from device.
\r
9397 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9399 if ( result == -1 ) {
\r
9400 // We'll assume this is an overrun, though there isn't a
\r
9401 // specific means for determining that.
\r
9402 handle->xrun[1] = true;
\r
9403 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9404 error( RtAudioError::WARNING );
\r
9408 // Do byte swapping if necessary.
\r
9409 if ( stream_.doByteSwap[1] )
\r
9410 byteSwapBuffer( buffer, samples, format );
\r
9412 // Do buffer conversion if necessary.
\r
9413 if ( stream_.doConvertBuffer[1] )
\r
9414 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9418 MUTEX_UNLOCK( &stream_.mutex );
\r
9420 RtApi::tickStreamTime();
\r
9421 if ( doStopStream == 1 ) this->stopStream();
\r
9424 static void *ossCallbackHandler( void *ptr )
\r
9426 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9427 RtApiOss *object = (RtApiOss *) info->object;
\r
9428 bool *isRunning = &info->isRunning;
\r
9430 while ( *isRunning == true ) {
\r
9431 pthread_testcancel();
\r
9432 object->callbackEvent();
\r
9435 pthread_exit( NULL );
\r
9438 //******************** End of __LINUX_OSS__ *********************//
\r
9442 // *************************************************** //
\r
9444 // Protected common (OS-independent) RtAudio methods.
\r
9446 // *************************************************** //
\r
9448 // This method can be modified to control the behavior of error
\r
9449 // message printing.
\r
9450 void RtApi :: error( RtAudioError::Type type )
\r
9452 errorStream_.str(""); // clear the ostringstream
\r
9454 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9455 if ( errorCallback ) {
\r
9456 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9458 if ( firstErrorOccurred_ )
\r
9461 firstErrorOccurred_ = true;
\r
9462 const std::string errorMessage = errorText_;
\r
9464 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9465 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9469 errorCallback( type, errorMessage );
\r
9470 firstErrorOccurred_ = false;
\r
9474 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9475 std::cerr << '\n' << errorText_ << "\n\n";
\r
9476 else if ( type != RtAudioError::WARNING )
\r
9477 throw( RtAudioError( errorText_, type ) );
\r
9480 void RtApi :: verifyStream()
\r
9482 if ( stream_.state == STREAM_CLOSED ) {
\r
9483 errorText_ = "RtApi:: a stream is not open!";
\r
9484 error( RtAudioError::INVALID_USE );
\r
9488 void RtApi :: clearStreamInfo()
\r
9490 stream_.mode = UNINITIALIZED;
\r
9491 stream_.state = STREAM_CLOSED;
\r
9492 stream_.sampleRate = 0;
\r
9493 stream_.bufferSize = 0;
\r
9494 stream_.nBuffers = 0;
\r
9495 stream_.userFormat = 0;
\r
9496 stream_.userInterleaved = true;
\r
9497 stream_.streamTime = 0.0;
\r
9498 stream_.apiHandle = 0;
\r
9499 stream_.deviceBuffer = 0;
\r
9500 stream_.callbackInfo.callback = 0;
\r
9501 stream_.callbackInfo.userData = 0;
\r
9502 stream_.callbackInfo.isRunning = false;
\r
9503 stream_.callbackInfo.errorCallback = 0;
\r
9504 for ( int i=0; i<2; i++ ) {
\r
9505 stream_.device[i] = 11111;
\r
9506 stream_.doConvertBuffer[i] = false;
\r
9507 stream_.deviceInterleaved[i] = true;
\r
9508 stream_.doByteSwap[i] = false;
\r
9509 stream_.nUserChannels[i] = 0;
\r
9510 stream_.nDeviceChannels[i] = 0;
\r
9511 stream_.channelOffset[i] = 0;
\r
9512 stream_.deviceFormat[i] = 0;
\r
9513 stream_.latency[i] = 0;
\r
9514 stream_.userBuffer[i] = 0;
\r
9515 stream_.convertInfo[i].channels = 0;
\r
9516 stream_.convertInfo[i].inJump = 0;
\r
9517 stream_.convertInfo[i].outJump = 0;
\r
9518 stream_.convertInfo[i].inFormat = 0;
\r
9519 stream_.convertInfo[i].outFormat = 0;
\r
9520 stream_.convertInfo[i].inOffset.clear();
\r
9521 stream_.convertInfo[i].outOffset.clear();
\r
9525 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9527 if ( format == RTAUDIO_SINT16 )
\r
9529 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9531 else if ( format == RTAUDIO_FLOAT64 )
\r
9533 else if ( format == RTAUDIO_SINT24 )
\r
9535 else if ( format == RTAUDIO_SINT8 )
\r
9538 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9539 error( RtAudioError::WARNING );
\r
9544 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9546 if ( mode == INPUT ) { // convert device to user buffer
\r
9547 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9548 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9549 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9550 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9552 else { // convert user to device buffer
\r
9553 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9554 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9555 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9556 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9559 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9560 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9562 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9564 // Set up the interleave/deinterleave offsets.
\r
9565 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9566 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9567 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9568 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9569 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9570 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9571 stream_.convertInfo[mode].inJump = 1;
\r
9575 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9576 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9577 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9578 stream_.convertInfo[mode].outJump = 1;
\r
9582 else { // no (de)interleaving
\r
9583 if ( stream_.userInterleaved ) {
\r
9584 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9585 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9586 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9590 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9591 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9592 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9593 stream_.convertInfo[mode].inJump = 1;
\r
9594 stream_.convertInfo[mode].outJump = 1;
\r
9599 // Add channel offset.
\r
9600 if ( firstChannel > 0 ) {
\r
9601 if ( stream_.deviceInterleaved[mode] ) {
\r
9602 if ( mode == OUTPUT ) {
\r
9603 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9604 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9607 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9608 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9612 if ( mode == OUTPUT ) {
\r
9613 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9614 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9617 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9618 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9624 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9626 // This function does format conversion, input/output channel compensation, and
\r
9627 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9628 // the lower three bytes of a 32-bit integer.
\r
9630 // Clear our device buffer when in/out duplex device channels are different
\r
9631 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9632 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9633 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9636 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9638 Float64 *out = (Float64 *)outBuffer;
\r
9640 if (info.inFormat == RTAUDIO_SINT8) {
\r
9641 signed char *in = (signed char *)inBuffer;
\r
9642 scale = 1.0 / 127.5;
\r
9643 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9644 for (j=0; j<info.channels; j++) {
\r
9645 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9646 out[info.outOffset[j]] += 0.5;
\r
9647 out[info.outOffset[j]] *= scale;
\r
9649 in += info.inJump;
\r
9650 out += info.outJump;
\r
9653 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9654 Int16 *in = (Int16 *)inBuffer;
\r
9655 scale = 1.0 / 32767.5;
\r
9656 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9657 for (j=0; j<info.channels; j++) {
\r
9658 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9659 out[info.outOffset[j]] += 0.5;
\r
9660 out[info.outOffset[j]] *= scale;
\r
9662 in += info.inJump;
\r
9663 out += info.outJump;
\r
9666 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9667 Int24 *in = (Int24 *)inBuffer;
\r
9668 scale = 1.0 / 8388607.5;
\r
9669 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9670 for (j=0; j<info.channels; j++) {
\r
9671 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9672 out[info.outOffset[j]] += 0.5;
\r
9673 out[info.outOffset[j]] *= scale;
\r
9675 in += info.inJump;
\r
9676 out += info.outJump;
\r
9679 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9680 Int32 *in = (Int32 *)inBuffer;
\r
9681 scale = 1.0 / 2147483647.5;
\r
9682 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9683 for (j=0; j<info.channels; j++) {
\r
9684 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9685 out[info.outOffset[j]] += 0.5;
\r
9686 out[info.outOffset[j]] *= scale;
\r
9688 in += info.inJump;
\r
9689 out += info.outJump;
\r
9692 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9693 Float32 *in = (Float32 *)inBuffer;
\r
9694 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9695 for (j=0; j<info.channels; j++) {
\r
9696 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9698 in += info.inJump;
\r
9699 out += info.outJump;
\r
9702 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9703 // Channel compensation and/or (de)interleaving only.
\r
9704 Float64 *in = (Float64 *)inBuffer;
\r
9705 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9706 for (j=0; j<info.channels; j++) {
\r
9707 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9709 in += info.inJump;
\r
9710 out += info.outJump;
\r
9714 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9716 Float32 *out = (Float32 *)outBuffer;
\r
9718 if (info.inFormat == RTAUDIO_SINT8) {
\r
9719 signed char *in = (signed char *)inBuffer;
\r
9720 scale = (Float32) ( 1.0 / 127.5 );
\r
9721 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9722 for (j=0; j<info.channels; j++) {
\r
9723 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9724 out[info.outOffset[j]] += 0.5;
\r
9725 out[info.outOffset[j]] *= scale;
\r
9727 in += info.inJump;
\r
9728 out += info.outJump;
\r
9731 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9732 Int16 *in = (Int16 *)inBuffer;
\r
9733 scale = (Float32) ( 1.0 / 32767.5 );
\r
9734 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9735 for (j=0; j<info.channels; j++) {
\r
9736 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9737 out[info.outOffset[j]] += 0.5;
\r
9738 out[info.outOffset[j]] *= scale;
\r
9740 in += info.inJump;
\r
9741 out += info.outJump;
\r
9744 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9745 Int24 *in = (Int24 *)inBuffer;
\r
9746 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9747 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9748 for (j=0; j<info.channels; j++) {
\r
9749 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9750 out[info.outOffset[j]] += 0.5;
\r
9751 out[info.outOffset[j]] *= scale;
\r
9753 in += info.inJump;
\r
9754 out += info.outJump;
\r
9757 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9758 Int32 *in = (Int32 *)inBuffer;
\r
9759 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9760 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9761 for (j=0; j<info.channels; j++) {
\r
9762 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9763 out[info.outOffset[j]] += 0.5;
\r
9764 out[info.outOffset[j]] *= scale;
\r
9766 in += info.inJump;
\r
9767 out += info.outJump;
\r
9770 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9771 // Channel compensation and/or (de)interleaving only.
\r
9772 Float32 *in = (Float32 *)inBuffer;
\r
9773 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9774 for (j=0; j<info.channels; j++) {
\r
9775 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9777 in += info.inJump;
\r
9778 out += info.outJump;
\r
9781 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9782 Float64 *in = (Float64 *)inBuffer;
\r
9783 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9784 for (j=0; j<info.channels; j++) {
\r
9785 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9787 in += info.inJump;
\r
9788 out += info.outJump;
\r
9792 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9793 Int32 *out = (Int32 *)outBuffer;
\r
9794 if (info.inFormat == RTAUDIO_SINT8) {
\r
9795 signed char *in = (signed char *)inBuffer;
\r
9796 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9797 for (j=0; j<info.channels; j++) {
\r
9798 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9799 out[info.outOffset[j]] <<= 24;
\r
9801 in += info.inJump;
\r
9802 out += info.outJump;
\r
9805 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9806 Int16 *in = (Int16 *)inBuffer;
\r
9807 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9808 for (j=0; j<info.channels; j++) {
\r
9809 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9810 out[info.outOffset[j]] <<= 16;
\r
9812 in += info.inJump;
\r
9813 out += info.outJump;
\r
9816 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9817 Int24 *in = (Int24 *)inBuffer;
\r
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9819 for (j=0; j<info.channels; j++) {
\r
9820 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9821 out[info.outOffset[j]] <<= 8;
\r
9823 in += info.inJump;
\r
9824 out += info.outJump;
\r
9827 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9828 // Channel compensation and/or (de)interleaving only.
\r
9829 Int32 *in = (Int32 *)inBuffer;
\r
9830 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9831 for (j=0; j<info.channels; j++) {
\r
9832 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9834 in += info.inJump;
\r
9835 out += info.outJump;
\r
9838 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9839 Float32 *in = (Float32 *)inBuffer;
\r
9840 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9841 for (j=0; j<info.channels; j++) {
\r
9842 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9844 in += info.inJump;
\r
9845 out += info.outJump;
\r
9848 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9849 Float64 *in = (Float64 *)inBuffer;
\r
9850 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9851 for (j=0; j<info.channels; j++) {
\r
9852 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9854 in += info.inJump;
\r
9855 out += info.outJump;
\r
9859 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9860 Int24 *out = (Int24 *)outBuffer;
\r
9861 if (info.inFormat == RTAUDIO_SINT8) {
\r
9862 signed char *in = (signed char *)inBuffer;
\r
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9864 for (j=0; j<info.channels; j++) {
\r
9865 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9866 //out[info.outOffset[j]] <<= 16;
\r
9868 in += info.inJump;
\r
9869 out += info.outJump;
\r
9872 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9873 Int16 *in = (Int16 *)inBuffer;
\r
9874 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9875 for (j=0; j<info.channels; j++) {
\r
9876 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9877 //out[info.outOffset[j]] <<= 8;
\r
9879 in += info.inJump;
\r
9880 out += info.outJump;
\r
9883 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9884 // Channel compensation and/or (de)interleaving only.
\r
9885 Int24 *in = (Int24 *)inBuffer;
\r
9886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9887 for (j=0; j<info.channels; j++) {
\r
9888 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9890 in += info.inJump;
\r
9891 out += info.outJump;
\r
9894 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9895 Int32 *in = (Int32 *)inBuffer;
\r
9896 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9897 for (j=0; j<info.channels; j++) {
\r
9898 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9899 //out[info.outOffset[j]] >>= 8;
\r
9901 in += info.inJump;
\r
9902 out += info.outJump;
\r
9905 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9906 Float32 *in = (Float32 *)inBuffer;
\r
9907 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9908 for (j=0; j<info.channels; j++) {
\r
9909 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9911 in += info.inJump;
\r
9912 out += info.outJump;
\r
9915 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9916 Float64 *in = (Float64 *)inBuffer;
\r
9917 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9918 for (j=0; j<info.channels; j++) {
\r
9919 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9921 in += info.inJump;
\r
9922 out += info.outJump;
\r
9926 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9927 Int16 *out = (Int16 *)outBuffer;
\r
9928 if (info.inFormat == RTAUDIO_SINT8) {
\r
9929 signed char *in = (signed char *)inBuffer;
\r
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9931 for (j=0; j<info.channels; j++) {
\r
9932 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9933 out[info.outOffset[j]] <<= 8;
\r
9935 in += info.inJump;
\r
9936 out += info.outJump;
\r
9939 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9940 // Channel compensation and/or (de)interleaving only.
\r
9941 Int16 *in = (Int16 *)inBuffer;
\r
9942 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9943 for (j=0; j<info.channels; j++) {
\r
9944 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9946 in += info.inJump;
\r
9947 out += info.outJump;
\r
9950 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9951 Int24 *in = (Int24 *)inBuffer;
\r
9952 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9953 for (j=0; j<info.channels; j++) {
\r
9954 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9956 in += info.inJump;
\r
9957 out += info.outJump;
\r
9960 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9961 Int32 *in = (Int32 *)inBuffer;
\r
9962 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9963 for (j=0; j<info.channels; j++) {
\r
9964 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9966 in += info.inJump;
\r
9967 out += info.outJump;
\r
9970 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9971 Float32 *in = (Float32 *)inBuffer;
\r
9972 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9973 for (j=0; j<info.channels; j++) {
\r
9974 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9976 in += info.inJump;
\r
9977 out += info.outJump;
\r
9980 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9981 Float64 *in = (Float64 *)inBuffer;
\r
9982 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9983 for (j=0; j<info.channels; j++) {
\r
9984 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9986 in += info.inJump;
\r
9987 out += info.outJump;
\r
9991 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9992 signed char *out = (signed char *)outBuffer;
\r
9993 if (info.inFormat == RTAUDIO_SINT8) {
\r
9994 // Channel compensation and/or (de)interleaving only.
\r
9995 signed char *in = (signed char *)inBuffer;
\r
9996 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9997 for (j=0; j<info.channels; j++) {
\r
9998 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10000 in += info.inJump;
\r
10001 out += info.outJump;
\r
10004 if (info.inFormat == RTAUDIO_SINT16) {
\r
10005 Int16 *in = (Int16 *)inBuffer;
\r
10006 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10007 for (j=0; j<info.channels; j++) {
\r
10008 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10010 in += info.inJump;
\r
10011 out += info.outJump;
\r
10014 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10015 Int24 *in = (Int24 *)inBuffer;
\r
10016 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10017 for (j=0; j<info.channels; j++) {
\r
10018 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10020 in += info.inJump;
\r
10021 out += info.outJump;
\r
10024 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10025 Int32 *in = (Int32 *)inBuffer;
\r
10026 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10027 for (j=0; j<info.channels; j++) {
\r
10028 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10030 in += info.inJump;
\r
10031 out += info.outJump;
\r
10034 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10035 Float32 *in = (Float32 *)inBuffer;
\r
10036 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10037 for (j=0; j<info.channels; j++) {
\r
10038 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10040 in += info.inJump;
\r
10041 out += info.outJump;
\r
10044 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10045 Float64 *in = (Float64 *)inBuffer;
\r
10046 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10047 for (j=0; j<info.channels; j++) {
\r
10048 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10050 in += info.inJump;
\r
10051 out += info.outJump;
\r
10057 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10058 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10059 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10061 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10063 register char val;
\r
10064 register char *ptr;
\r
10067 if ( format == RTAUDIO_SINT16 ) {
\r
10068 for ( unsigned int i=0; i<samples; i++ ) {
\r
10069 // Swap 1st and 2nd bytes.
\r
10071 *(ptr) = *(ptr+1);
\r
10074 // Increment 2 bytes.
\r
10078 else if ( format == RTAUDIO_SINT32 ||
\r
10079 format == RTAUDIO_FLOAT32 ) {
\r
10080 for ( unsigned int i=0; i<samples; i++ ) {
\r
10081 // Swap 1st and 4th bytes.
\r
10083 *(ptr) = *(ptr+3);
\r
10086 // Swap 2nd and 3rd bytes.
\r
10089 *(ptr) = *(ptr+1);
\r
10092 // Increment 3 more bytes.
\r
10096 else if ( format == RTAUDIO_SINT24 ) {
\r
10097 for ( unsigned int i=0; i<samples; i++ ) {
\r
10098 // Swap 1st and 3rd bytes.
\r
10100 *(ptr) = *(ptr+2);
\r
10103 // Increment 2 more bytes.
\r
10107 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10108 for ( unsigned int i=0; i<samples; i++ ) {
\r
10109 // Swap 1st and 8th bytes
\r
10111 *(ptr) = *(ptr+7);
\r
10114 // Swap 2nd and 7th bytes
\r
10117 *(ptr) = *(ptr+5);
\r
10120 // Swap 3rd and 6th bytes
\r
10123 *(ptr) = *(ptr+3);
\r
10126 // Swap 4th and 5th bytes
\r
10129 *(ptr) = *(ptr+1);
\r
10132 // Increment 5 more bytes.
\r
10138 // Indentation settings for Vim and Emacs
\r
10140 // Local Variables:
\r
10141 // c-basic-offset: 2
\r
10142 // indent-tabs-mode: nil
\r
10145 // vim: et sts=2 sw=2
\r