1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1pre
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 std::string RtAudio :: getVersion( void ) throw()
\r
80 return RTAUDIO_VERSION;
\r
83 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
87 // The order here will control the order of RtAudio's API search in
\r
89 #if defined(__UNIX_JACK__)
\r
90 apis.push_back( UNIX_JACK );
\r
92 #if defined(__LINUX_ALSA__)
\r
93 apis.push_back( LINUX_ALSA );
\r
95 #if defined(__LINUX_PULSE__)
\r
96 apis.push_back( LINUX_PULSE );
\r
98 #if defined(__LINUX_OSS__)
\r
99 apis.push_back( LINUX_OSS );
\r
101 #if defined(__WINDOWS_ASIO__)
\r
102 apis.push_back( WINDOWS_ASIO );
\r
104 #if defined(__WINDOWS_WASAPI__)
\r
105 apis.push_back( WINDOWS_WASAPI );
\r
107 #if defined(__WINDOWS_DS__)
\r
108 apis.push_back( WINDOWS_DS );
\r
110 #if defined(__MACOSX_CORE__)
\r
111 apis.push_back( MACOSX_CORE );
\r
113 #if defined(__RTAUDIO_DUMMY__)
\r
114 apis.push_back( RTAUDIO_DUMMY );
\r
118 void RtAudio :: openRtApi( RtAudio::Api api )
\r
124 #if defined(__UNIX_JACK__)
\r
125 if ( api == UNIX_JACK )
\r
126 rtapi_ = new RtApiJack();
\r
128 #if defined(__LINUX_ALSA__)
\r
129 if ( api == LINUX_ALSA )
\r
130 rtapi_ = new RtApiAlsa();
\r
132 #if defined(__LINUX_PULSE__)
\r
133 if ( api == LINUX_PULSE )
\r
134 rtapi_ = new RtApiPulse();
\r
136 #if defined(__LINUX_OSS__)
\r
137 if ( api == LINUX_OSS )
\r
138 rtapi_ = new RtApiOss();
\r
140 #if defined(__WINDOWS_ASIO__)
\r
141 if ( api == WINDOWS_ASIO )
\r
142 rtapi_ = new RtApiAsio();
\r
144 #if defined(__WINDOWS_WASAPI__)
\r
145 if ( api == WINDOWS_WASAPI )
\r
146 rtapi_ = new RtApiWasapi();
\r
148 #if defined(__WINDOWS_DS__)
\r
149 if ( api == WINDOWS_DS )
\r
150 rtapi_ = new RtApiDs();
\r
152 #if defined(__MACOSX_CORE__)
\r
153 if ( api == MACOSX_CORE )
\r
154 rtapi_ = new RtApiCore();
\r
156 #if defined(__RTAUDIO_DUMMY__)
\r
157 if ( api == RTAUDIO_DUMMY )
\r
158 rtapi_ = new RtApiDummy();
\r
162 RtAudio :: RtAudio( RtAudio::Api api )
\r
166 if ( api != UNSPECIFIED ) {
\r
167 // Attempt to open the specified API.
\r
169 if ( rtapi_ ) return;
\r
171 // No compiled support for specified API value. Issue a debug
\r
172 // warning and continue as if no API was specified.
\r
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
176 // Iterate through the compiled APIs and return as soon as we find
\r
177 // one with at least one device or we reach the end of the list.
\r
178 std::vector< RtAudio::Api > apis;
\r
179 getCompiledApi( apis );
\r
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
181 openRtApi( apis[i] );
\r
182 if ( rtapi_->getDeviceCount() ) break;
\r
185 if ( rtapi_ ) return;
\r
187 // It should not be possible to get here because the preprocessor
\r
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
189 // API-specific definitions are passed to the compiler. But just in
\r
190 // case something weird happens, we'll thow an error.
\r
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
195 RtAudio :: ~RtAudio() throw()
\r
201 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
202 RtAudio::StreamParameters *inputParameters,
\r
203 RtAudioFormat format, unsigned int sampleRate,
\r
204 unsigned int *bufferFrames,
\r
205 RtAudioCallback callback, void *userData,
\r
206 RtAudio::StreamOptions *options,
\r
207 RtAudioErrorCallback errorCallback )
\r
209 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
210 sampleRate, bufferFrames, callback,
\r
211 userData, options, errorCallback );
\r
214 // *************************************************** //
\r
216 // Public RtApi definitions (see end of file for
\r
217 // private or protected utility functions).
\r
219 // *************************************************** //
\r
223 stream_.state = STREAM_CLOSED;
\r
224 stream_.mode = UNINITIALIZED;
\r
225 stream_.apiHandle = 0;
\r
226 stream_.userBuffer[0] = 0;
\r
227 stream_.userBuffer[1] = 0;
\r
228 MUTEX_INITIALIZE( &stream_.mutex );
\r
229 showWarnings_ = true;
\r
230 firstErrorOccurred_ = false;
\r
235 MUTEX_DESTROY( &stream_.mutex );
\r
238 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
239 RtAudio::StreamParameters *iParams,
\r
240 RtAudioFormat format, unsigned int sampleRate,
\r
241 unsigned int *bufferFrames,
\r
242 RtAudioCallback callback, void *userData,
\r
243 RtAudio::StreamOptions *options,
\r
244 RtAudioErrorCallback errorCallback )
\r
246 if ( stream_.state != STREAM_CLOSED ) {
\r
247 errorText_ = "RtApi::openStream: a stream is already open!";
\r
248 error( RtAudioError::INVALID_USE );
\r
252 // Clear stream information potentially left from a previously open stream.
\r
255 if ( oParams && oParams->nChannels < 1 ) {
\r
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
257 error( RtAudioError::INVALID_USE );
\r
261 if ( iParams && iParams->nChannels < 1 ) {
\r
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
263 error( RtAudioError::INVALID_USE );
\r
267 if ( oParams == NULL && iParams == NULL ) {
\r
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
269 error( RtAudioError::INVALID_USE );
\r
273 if ( formatBytes(format) == 0 ) {
\r
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 unsigned int nDevices = getDeviceCount();
\r
280 unsigned int oChannels = 0;
\r
282 oChannels = oParams->nChannels;
\r
283 if ( oParams->deviceId >= nDevices ) {
\r
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
285 error( RtAudioError::INVALID_USE );
\r
290 unsigned int iChannels = 0;
\r
292 iChannels = iParams->nChannels;
\r
293 if ( iParams->deviceId >= nDevices ) {
\r
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
295 error( RtAudioError::INVALID_USE );
\r
302 if ( oChannels > 0 ) {
\r
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
305 sampleRate, format, bufferFrames, options );
\r
306 if ( result == false ) {
\r
307 error( RtAudioError::SYSTEM_ERROR );
\r
312 if ( iChannels > 0 ) {
\r
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
315 sampleRate, format, bufferFrames, options );
\r
316 if ( result == false ) {
\r
317 if ( oChannels > 0 ) closeStream();
\r
318 error( RtAudioError::SYSTEM_ERROR );
\r
323 stream_.callbackInfo.callback = (void *) callback;
\r
324 stream_.callbackInfo.userData = userData;
\r
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
328 stream_.state = STREAM_STOPPED;
\r
331 unsigned int RtApi :: getDefaultInputDevice( void )
\r
333 // Should be implemented in subclasses if possible.
\r
337 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
339 // Should be implemented in subclasses if possible.
\r
343 void RtApi :: closeStream( void )
\r
345 // MUST be implemented in subclasses!
\r
349 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
352 RtAudio::StreamOptions * /*options*/ )
\r
354 // MUST be implemented in subclasses!
\r
358 void RtApi :: tickStreamTime( void )
\r
360 // Subclasses that do not provide their own implementation of
\r
361 // getStreamTime should call this function once per buffer I/O to
\r
362 // provide basic stream time support.
\r
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
366 #if defined( HAVE_GETTIMEOFDAY )
\r
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
371 long RtApi :: getStreamLatency( void )
\r
375 long totalLatency = 0;
\r
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
377 totalLatency = stream_.latency[0];
\r
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
379 totalLatency += stream_.latency[1];
\r
381 return totalLatency;
\r
384 double RtApi :: getStreamTime( void )
\r
388 #if defined( HAVE_GETTIMEOFDAY )
\r
389 // Return a very accurate estimate of the stream time by
\r
390 // adding in the elapsed time since the last tick.
\r
391 struct timeval then;
\r
392 struct timeval now;
\r
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
395 return stream_.streamTime;
\r
397 gettimeofday( &now, NULL );
\r
398 then = stream_.lastTickTimestamp;
\r
399 return stream_.streamTime +
\r
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
401 (then.tv_sec + 0.000001 * then.tv_usec));
\r
403 return stream_.streamTime;
\r
407 unsigned int RtApi :: getStreamSampleRate( void )
\r
411 return stream_.sampleRate;
\r
415 // *************************************************** //
\r
417 // OS/API-specific methods.
\r
419 // *************************************************** //
\r
421 #if defined(__MACOSX_CORE__)
\r
423 // The OS X CoreAudio API is designed to use a separate callback
\r
424 // procedure for each of its audio devices. A single RtAudio duplex
\r
425 // stream using two different devices is supported here, though it
\r
426 // cannot be guaranteed to always behave correctly because we cannot
\r
427 // synchronize these two callbacks.
\r
429 // A property listener is installed for over/underrun information.
\r
430 // However, no functionality is currently provided to allow property
\r
431 // listeners to trigger user handlers because it is unclear what could
\r
432 // be done if a critical stream parameter (buffer size, sample rate,
\r
433 // device disconnect) notification arrived. The listeners entail
\r
434 // quite a bit of extra code and most likely, a user program wouldn't
\r
435 // be prepared for the result anyway. However, we do provide a flag
\r
436 // to the client callback function to inform of an over/underrun.
\r
438 // A structure to hold various information related to the CoreAudio API
\r
440 struct CoreHandle {
\r
441 AudioDeviceID id[2]; // device ids
\r
442 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
443 AudioDeviceIOProcID procId[2];
\r
445 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
446 UInt32 nStreams[2]; // number of streams to use
\r
448 char *deviceBuffer;
\r
449 pthread_cond_t condition;
\r
450 int drainCounter; // Tracks callback counts when draining
\r
451 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
454 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
457 RtApiCore:: RtApiCore()
\r
459 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
460 // This is a largely undocumented but absolutely necessary
\r
461 // requirement starting with OS-X 10.6. If not called, queries and
\r
462 // updates to various audio device properties are not handled
\r
464 CFRunLoopRef theRunLoop = NULL;
\r
465 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
466 kAudioObjectPropertyScopeGlobal,
\r
467 kAudioObjectPropertyElementMaster };
\r
468 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
469 if ( result != noErr ) {
\r
470 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
471 error( RtAudioError::WARNING );
\r
476 RtApiCore :: ~RtApiCore()
\r
478 // The subclass destructor gets called before the base class
\r
479 // destructor, so close an existing stream before deallocating
\r
480 // apiDeviceId memory.
\r
481 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
484 unsigned int RtApiCore :: getDeviceCount( void )
\r
486 // Find out how many audio devices there are, if any.
\r
488 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
489 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
490 if ( result != noErr ) {
\r
491 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
492 error( RtAudioError::WARNING );
\r
496 return dataSize / sizeof( AudioDeviceID );
\r
499 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
501 unsigned int nDevices = getDeviceCount();
\r
502 if ( nDevices <= 1 ) return 0;
\r
505 UInt32 dataSize = sizeof( AudioDeviceID );
\r
506 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
507 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
508 if ( result != noErr ) {
\r
509 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
510 error( RtAudioError::WARNING );
\r
514 dataSize *= nDevices;
\r
515 AudioDeviceID deviceList[ nDevices ];
\r
516 property.mSelector = kAudioHardwarePropertyDevices;
\r
517 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
518 if ( result != noErr ) {
\r
519 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
520 error( RtAudioError::WARNING );
\r
524 for ( unsigned int i=0; i<nDevices; i++ )
\r
525 if ( id == deviceList[i] ) return i;
\r
527 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
528 error( RtAudioError::WARNING );
\r
532 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
534 unsigned int nDevices = getDeviceCount();
\r
535 if ( nDevices <= 1 ) return 0;
\r
538 UInt32 dataSize = sizeof( AudioDeviceID );
\r
539 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
540 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
541 if ( result != noErr ) {
\r
542 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
543 error( RtAudioError::WARNING );
\r
547 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioDeviceID deviceList[ nDevices ];
\r
549 property.mSelector = kAudioHardwarePropertyDevices;
\r
550 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
551 if ( result != noErr ) {
\r
552 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
553 error( RtAudioError::WARNING );
\r
557 for ( unsigned int i=0; i<nDevices; i++ )
\r
558 if ( id == deviceList[i] ) return i;
\r
560 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
561 error( RtAudioError::WARNING );
\r
565 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
567 RtAudio::DeviceInfo info;
\r
568 info.probed = false;
\r
571 unsigned int nDevices = getDeviceCount();
\r
572 if ( nDevices == 0 ) {
\r
573 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
574 error( RtAudioError::INVALID_USE );
\r
578 if ( device >= nDevices ) {
\r
579 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
580 error( RtAudioError::INVALID_USE );
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
586 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
587 kAudioObjectPropertyScopeGlobal,
\r
588 kAudioObjectPropertyElementMaster };
\r
589 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
590 0, NULL, &dataSize, (void *) &deviceList );
\r
591 if ( result != noErr ) {
\r
592 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
593 error( RtAudioError::WARNING );
\r
597 AudioDeviceID id = deviceList[ device ];
\r
599 // Get the device name.
\r
601 CFStringRef cfname;
\r
602 dataSize = sizeof( CFStringRef );
\r
603 property.mSelector = kAudioObjectPropertyManufacturer;
\r
604 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
605 if ( result != noErr ) {
\r
606 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
607 errorText_ = errorStream_.str();
\r
608 error( RtAudioError::WARNING );
\r
612 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
613 int length = CFStringGetLength(cfname);
\r
614 char *mname = (char *)malloc(length * 3 + 1);
\r
615 #if defined( UNICODE ) || defined( _UNICODE )
\r
616 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
618 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
620 info.name.append( (const char *)mname, strlen(mname) );
\r
621 info.name.append( ": " );
\r
622 CFRelease( cfname );
\r
625 property.mSelector = kAudioObjectPropertyName;
\r
626 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
627 if ( result != noErr ) {
\r
628 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
629 errorText_ = errorStream_.str();
\r
630 error( RtAudioError::WARNING );
\r
634 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
635 length = CFStringGetLength(cfname);
\r
636 char *name = (char *)malloc(length * 3 + 1);
\r
637 #if defined( UNICODE ) || defined( _UNICODE )
\r
638 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
640 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
642 info.name.append( (const char *)name, strlen(name) );
\r
643 CFRelease( cfname );
\r
646 // Get the output stream "configuration".
\r
647 AudioBufferList *bufferList = nil;
\r
648 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
649 property.mScope = kAudioDevicePropertyScopeOutput;
\r
650 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
652 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
653 if ( result != noErr || dataSize == 0 ) {
\r
654 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 // Allocate the AudioBufferList.
\r
661 bufferList = (AudioBufferList *) malloc( dataSize );
\r
662 if ( bufferList == NULL ) {
\r
663 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
664 error( RtAudioError::WARNING );
\r
668 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
669 if ( result != noErr || dataSize == 0 ) {
\r
670 free( bufferList );
\r
671 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
672 errorText_ = errorStream_.str();
\r
673 error( RtAudioError::WARNING );
\r
677 // Get output channel information.
\r
678 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
679 for ( i=0; i<nStreams; i++ )
\r
680 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
681 free( bufferList );
\r
683 // Get the input stream "configuration".
\r
684 property.mScope = kAudioDevicePropertyScopeInput;
\r
685 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
686 if ( result != noErr || dataSize == 0 ) {
\r
687 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
688 errorText_ = errorStream_.str();
\r
689 error( RtAudioError::WARNING );
\r
693 // Allocate the AudioBufferList.
\r
694 bufferList = (AudioBufferList *) malloc( dataSize );
\r
695 if ( bufferList == NULL ) {
\r
696 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
697 error( RtAudioError::WARNING );
\r
701 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
702 if (result != noErr || dataSize == 0) {
\r
703 free( bufferList );
\r
704 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
705 errorText_ = errorStream_.str();
\r
706 error( RtAudioError::WARNING );
\r
710 // Get input channel information.
\r
711 nStreams = bufferList->mNumberBuffers;
\r
712 for ( i=0; i<nStreams; i++ )
\r
713 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
714 free( bufferList );
\r
716 // If device opens for both playback and capture, we determine the channels.
\r
717 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
718 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
720 // Probe the device sample rates.
\r
721 bool isInput = false;
\r
722 if ( info.outputChannels == 0 ) isInput = true;
\r
724 // Determine the supported sample rates.
\r
725 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
726 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
727 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
728 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
736 AudioValueRange rangeList[ nRanges ];
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
738 if ( result != kAudioHardwareNoError ) {
\r
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
740 errorText_ = errorStream_.str();
\r
741 error( RtAudioError::WARNING );
\r
745 // The sample rate reporting mechanism is a bit of a mystery. It
\r
746 // seems that it can either return individual rates or a range of
\r
747 // rates. I assume that if the min / max range values are the same,
\r
748 // then that represents a single supported rate and if the min / max
\r
749 // range values are different, the device supports an arbitrary
\r
750 // range of values (though there might be multiple ranges, so we'll
\r
751 // use the most conservative range).
\r
752 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
753 bool haveValueRange = false;
\r
754 info.sampleRates.clear();
\r
755 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
756 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
757 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
759 haveValueRange = true;
\r
760 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
761 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
765 if ( haveValueRange ) {
\r
766 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
767 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
768 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
772 // Sort and remove any redundant values
\r
773 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
774 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
776 if ( info.sampleRates.size() == 0 ) {
\r
777 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
778 errorText_ = errorStream_.str();
\r
779 error( RtAudioError::WARNING );
\r
783 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
784 // Thus, any other "physical" formats supported by the device are of
\r
785 // no interest to the client.
\r
786 info.nativeFormats = RTAUDIO_FLOAT32;
\r
788 if ( info.outputChannels > 0 )
\r
789 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
790 if ( info.inputChannels > 0 )
\r
791 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
793 info.probed = true;
\r
797 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
798 const AudioTimeStamp* /*inNow*/,
\r
799 const AudioBufferList* inInputData,
\r
800 const AudioTimeStamp* /*inInputTime*/,
\r
801 AudioBufferList* outOutputData,
\r
802 const AudioTimeStamp* /*inOutputTime*/,
\r
803 void* infoPointer )
\r
805 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
807 RtApiCore *object = (RtApiCore *) info->object;
\r
808 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
809 return kAudioHardwareUnspecifiedError;
\r
811 return kAudioHardwareNoError;
\r
814 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
816 const AudioObjectPropertyAddress properties[],
\r
817 void* handlePointer )
\r
819 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
820 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
821 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
822 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
823 handle->xrun[1] = true;
\r
825 handle->xrun[0] = true;
\r
829 return kAudioHardwareNoError;
\r
832 static OSStatus rateListener( AudioObjectID inDevice,
\r
833 UInt32 /*nAddresses*/,
\r
834 const AudioObjectPropertyAddress /*properties*/[],
\r
835 void* ratePointer )
\r
837 Float64 *rate = (Float64 *) ratePointer;
\r
838 UInt32 dataSize = sizeof( Float64 );
\r
839 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
840 kAudioObjectPropertyScopeGlobal,
\r
841 kAudioObjectPropertyElementMaster };
\r
842 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
843 return kAudioHardwareNoError;
\r
846 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
847 unsigned int firstChannel, unsigned int sampleRate,
\r
848 RtAudioFormat format, unsigned int *bufferSize,
\r
849 RtAudio::StreamOptions *options )
\r
852 unsigned int nDevices = getDeviceCount();
\r
853 if ( nDevices == 0 ) {
\r
854 // This should not happen because a check is made before this function is called.
\r
855 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
859 if ( device >= nDevices ) {
\r
860 // This should not happen because a check is made before this function is called.
\r
861 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
865 AudioDeviceID deviceList[ nDevices ];
\r
866 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
867 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
868 kAudioObjectPropertyScopeGlobal,
\r
869 kAudioObjectPropertyElementMaster };
\r
870 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
871 0, NULL, &dataSize, (void *) &deviceList );
\r
872 if ( result != noErr ) {
\r
873 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
877 AudioDeviceID id = deviceList[ device ];
\r
879 // Setup for stream mode.
\r
880 bool isInput = false;
\r
881 if ( mode == INPUT ) {
\r
883 property.mScope = kAudioDevicePropertyScopeInput;
\r
886 property.mScope = kAudioDevicePropertyScopeOutput;
\r
888 // Get the stream "configuration".
\r
889 AudioBufferList *bufferList = nil;
\r
891 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
892 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
893 if ( result != noErr || dataSize == 0 ) {
\r
894 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
895 errorText_ = errorStream_.str();
\r
899 // Allocate the AudioBufferList.
\r
900 bufferList = (AudioBufferList *) malloc( dataSize );
\r
901 if ( bufferList == NULL ) {
\r
902 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
906 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
907 if (result != noErr || dataSize == 0) {
\r
908 free( bufferList );
\r
909 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
910 errorText_ = errorStream_.str();
\r
914 // Search for one or more streams that contain the desired number of
\r
915 // channels. CoreAudio devices can have an arbitrary number of
\r
916 // streams and each stream can have an arbitrary number of channels.
\r
917 // For each stream, a single buffer of interleaved samples is
\r
918 // provided. RtAudio prefers the use of one stream of interleaved
\r
919 // data or multiple consecutive single-channel streams. However, we
\r
920 // now support multiple consecutive multi-channel streams of
\r
921 // interleaved data as well.
\r
922 UInt32 iStream, offsetCounter = firstChannel;
\r
923 UInt32 nStreams = bufferList->mNumberBuffers;
\r
924 bool monoMode = false;
\r
925 bool foundStream = false;
\r
927 // First check that the device supports the requested number of
\r
929 UInt32 deviceChannels = 0;
\r
930 for ( iStream=0; iStream<nStreams; iStream++ )
\r
931 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
933 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
934 free( bufferList );
\r
935 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
936 errorText_ = errorStream_.str();
\r
940 // Look for a single stream meeting our needs.
\r
941 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
942 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
943 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
944 if ( streamChannels >= channels + offsetCounter ) {
\r
945 firstStream = iStream;
\r
946 channelOffset = offsetCounter;
\r
947 foundStream = true;
\r
950 if ( streamChannels > offsetCounter ) break;
\r
951 offsetCounter -= streamChannels;
\r
954 // If we didn't find a single stream above, then we should be able
\r
955 // to meet the channel specification with multiple streams.
\r
956 if ( foundStream == false ) {
\r
958 offsetCounter = firstChannel;
\r
959 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
960 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
961 if ( streamChannels > offsetCounter ) break;
\r
962 offsetCounter -= streamChannels;
\r
965 firstStream = iStream;
\r
966 channelOffset = offsetCounter;
\r
967 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
969 if ( streamChannels > 1 ) monoMode = false;
\r
970 while ( channelCounter > 0 ) {
\r
971 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
972 if ( streamChannels > 1 ) monoMode = false;
\r
973 channelCounter -= streamChannels;
\r
978 free( bufferList );
\r
980 // Determine the buffer size.
\r
981 AudioValueRange bufferRange;
\r
982 dataSize = sizeof( AudioValueRange );
\r
983 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
984 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
986 if ( result != noErr ) {
\r
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
988 errorText_ = errorStream_.str();
\r
992 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
993 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
996 // Set the buffer size. For multiple streams, I'm assuming we only
\r
997 // need to make this setting for the master channel.
\r
998 UInt32 theSize = (UInt32) *bufferSize;
\r
999 dataSize = sizeof( UInt32 );
\r
1000 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1001 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1003 if ( result != noErr ) {
\r
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1005 errorText_ = errorStream_.str();
\r
1009 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1010 // MUST be the same in both directions!
\r
1011 *bufferSize = theSize;
\r
1012 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1013 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1014 errorText_ = errorStream_.str();
\r
1018 stream_.bufferSize = *bufferSize;
\r
1019 stream_.nBuffers = 1;
\r
1021 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1022 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1024 dataSize = sizeof( hog_pid );
\r
1025 property.mSelector = kAudioDevicePropertyHogMode;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1029 errorText_ = errorStream_.str();
\r
1033 if ( hog_pid != getpid() ) {
\r
1034 hog_pid = getpid();
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1036 if ( result != noErr ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1038 errorText_ = errorStream_.str();
\r
1044 // Check and if necessary, change the sample rate for the device.
\r
1045 Float64 nominalRate;
\r
1046 dataSize = sizeof( Float64 );
\r
1047 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1048 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1051 errorText_ = errorStream_.str();
\r
1055 // Only change the sample rate if off by more than 1 Hz.
\r
1056 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1058 // Set a property listener for the sample rate change
\r
1059 Float64 reportedRate = 0.0;
\r
1060 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1061 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1064 errorText_ = errorStream_.str();
\r
1068 nominalRate = (Float64) sampleRate;
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1070 if ( result != noErr ) {
\r
1071 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1073 errorText_ = errorStream_.str();
\r
1077 // Now wait until the reported nominal rate is what we just set.
\r
1078 UInt32 microCounter = 0;
\r
1079 while ( reportedRate != nominalRate ) {
\r
1080 microCounter += 5000;
\r
1081 if ( microCounter > 5000000 ) break;
\r
1085 // Remove the property listener.
\r
1086 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1088 if ( microCounter > 5000000 ) {
\r
1089 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1090 errorText_ = errorStream_.str();
\r
1095 // Now set the stream format for all streams. Also, check the
\r
1096 // physical format of the device and change that if necessary.
\r
1097 AudioStreamBasicDescription description;
\r
1098 dataSize = sizeof( AudioStreamBasicDescription );
\r
1099 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1101 if ( result != noErr ) {
\r
1102 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1103 errorText_ = errorStream_.str();
\r
1107 // Set the sample rate and data format id. However, only make the
\r
1108 // change if the sample rate is not within 1.0 of the desired
\r
1109 // rate and the format is not linear pcm.
\r
1110 bool updateFormat = false;
\r
1111 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1112 description.mSampleRate = (Float64) sampleRate;
\r
1113 updateFormat = true;
\r
1116 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1117 description.mFormatID = kAudioFormatLinearPCM;
\r
1118 updateFormat = true;
\r
1121 if ( updateFormat ) {
\r
1122 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1123 if ( result != noErr ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now check the physical format.
\r
1131 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1132 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1133 if ( result != noErr ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1139 //std::cout << "Current physical stream format:" << std::endl;
\r
1140 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1141 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1142 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1143 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1145 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1146 description.mFormatID = kAudioFormatLinearPCM;
\r
1147 //description.mSampleRate = (Float64) sampleRate;
\r
1148 AudioStreamBasicDescription testDescription = description;
\r
1149 UInt32 formatFlags;
\r
1151 // We'll try higher bit rates first and then work our way down.
\r
1152 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1153 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1154 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1155 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1156 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1157 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1158 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1159 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1160 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1161 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1162 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1163 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1166 bool setPhysicalFormat = false;
\r
1167 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1168 testDescription = description;
\r
1169 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1170 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1171 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1172 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1174 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1175 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1176 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1177 if ( result == noErr ) {
\r
1178 setPhysicalFormat = true;
\r
1179 //std::cout << "Updated physical stream format:" << std::endl;
\r
1180 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1181 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1182 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1183 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1188 if ( !setPhysicalFormat ) {
\r
1189 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1190 errorText_ = errorStream_.str();
\r
1193 } // done setting virtual/physical formats.
\r
1195 // Get the stream / device latency.
\r
1197 dataSize = sizeof( UInt32 );
\r
1198 property.mSelector = kAudioDevicePropertyLatency;
\r
1199 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1201 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1204 errorText_ = errorStream_.str();
\r
1205 error( RtAudioError::WARNING );
\r
1209 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1210 // always be presented in native-endian format, so we should never
\r
1211 // need to byte swap.
\r
1212 stream_.doByteSwap[mode] = false;
\r
1214 // From the CoreAudio documentation, PCM data must be supplied as
\r
1216 stream_.userFormat = format;
\r
1217 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1219 if ( streamCount == 1 )
\r
1220 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1221 else // multiple streams
\r
1222 stream_.nDeviceChannels[mode] = channels;
\r
1223 stream_.nUserChannels[mode] = channels;
\r
1224 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1226 else stream_.userInterleaved = true;
\r
1227 stream_.deviceInterleaved[mode] = true;
\r
1228 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1230 // Set flags for buffer conversion.
\r
1231 stream_.doConvertBuffer[mode] = false;
\r
1232 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1233 stream_.doConvertBuffer[mode] = true;
\r
1234 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1235 stream_.doConvertBuffer[mode] = true;
\r
1236 if ( streamCount == 1 ) {
\r
1237 if ( stream_.nUserChannels[mode] > 1 &&
\r
1238 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1239 stream_.doConvertBuffer[mode] = true;
\r
1241 else if ( monoMode && stream_.userInterleaved )
\r
1242 stream_.doConvertBuffer[mode] = true;
\r
1244 // Allocate our CoreHandle structure for the stream.
\r
1245 CoreHandle *handle = 0;
\r
1246 if ( stream_.apiHandle == 0 ) {
\r
1248 handle = new CoreHandle;
\r
1250 catch ( std::bad_alloc& ) {
\r
1251 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1255 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1256 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1259 stream_.apiHandle = (void *) handle;
\r
1262 handle = (CoreHandle *) stream_.apiHandle;
\r
1263 handle->iStream[mode] = firstStream;
\r
1264 handle->nStreams[mode] = streamCount;
\r
1265 handle->id[mode] = id;
\r
1267 // Allocate necessary internal buffers.
\r
1268 unsigned long bufferBytes;
\r
1269 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1270 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1271 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1272 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1273 if ( stream_.userBuffer[mode] == NULL ) {
\r
1274 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1278 // If possible, we will make use of the CoreAudio stream buffers as
\r
1279 // "device buffers". However, we can't do this if using multiple
\r
1281 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1283 bool makeBuffer = true;
\r
1284 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1285 if ( mode == INPUT ) {
\r
1286 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1287 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1288 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1292 if ( makeBuffer ) {
\r
1293 bufferBytes *= *bufferSize;
\r
1294 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1295 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1296 if ( stream_.deviceBuffer == NULL ) {
\r
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1303 stream_.sampleRate = sampleRate;
\r
1304 stream_.device[mode] = device;
\r
1305 stream_.state = STREAM_STOPPED;
\r
1306 stream_.callbackInfo.object = (void *) this;
\r
1308 // Setup the buffer conversion information structure.
\r
1309 if ( stream_.doConvertBuffer[mode] ) {
\r
1310 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1311 else setConvertInfo( mode, channelOffset );
\r
1314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1315 // Only one callback procedure per device.
\r
1316 stream_.mode = DUPLEX;
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1321 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1322 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1324 if ( result != noErr ) {
\r
1325 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1326 errorText_ = errorStream_.str();
\r
1329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1330 stream_.mode = DUPLEX;
\r
1332 stream_.mode = mode;
\r
1335 // Setup the device property listener for over/underload.
\r
1336 property.mSelector = kAudioDeviceProcessorOverload;
\r
1337 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1338 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1344 pthread_cond_destroy( &handle->condition );
\r
1346 stream_.apiHandle = 0;
\r
1349 for ( int i=0; i<2; i++ ) {
\r
1350 if ( stream_.userBuffer[i] ) {
\r
1351 free( stream_.userBuffer[i] );
\r
1352 stream_.userBuffer[i] = 0;
\r
1356 if ( stream_.deviceBuffer ) {
\r
1357 free( stream_.deviceBuffer );
\r
1358 stream_.deviceBuffer = 0;
\r
1361 stream_.state = STREAM_CLOSED;
\r
1365 void RtApiCore :: closeStream( void )
\r
1367 if ( stream_.state == STREAM_CLOSED ) {
\r
1368 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1369 error( RtAudioError::WARNING );
\r
1373 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1375 if ( stream_.state == STREAM_RUNNING )
\r
1376 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1377 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1378 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1380 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1381 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1385 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1386 if ( stream_.state == STREAM_RUNNING )
\r
1387 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1388 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1389 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1391 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1392 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1396 for ( int i=0; i<2; i++ ) {
\r
1397 if ( stream_.userBuffer[i] ) {
\r
1398 free( stream_.userBuffer[i] );
\r
1399 stream_.userBuffer[i] = 0;
\r
1403 if ( stream_.deviceBuffer ) {
\r
1404 free( stream_.deviceBuffer );
\r
1405 stream_.deviceBuffer = 0;
\r
1408 // Destroy pthread condition variable.
\r
1409 pthread_cond_destroy( &handle->condition );
\r
1411 stream_.apiHandle = 0;
\r
1413 stream_.mode = UNINITIALIZED;
\r
1414 stream_.state = STREAM_CLOSED;
\r
1417 void RtApiCore :: startStream( void )
\r
1420 if ( stream_.state == STREAM_RUNNING ) {
\r
1421 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1422 error( RtAudioError::WARNING );
\r
1426 OSStatus result = noErr;
\r
1427 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1430 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1431 if ( result != noErr ) {
\r
1432 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1433 errorText_ = errorStream_.str();
\r
1438 if ( stream_.mode == INPUT ||
\r
1439 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1441 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1442 if ( result != noErr ) {
\r
1443 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1444 errorText_ = errorStream_.str();
\r
1449 handle->drainCounter = 0;
\r
1450 handle->internalDrain = false;
\r
1451 stream_.state = STREAM_RUNNING;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: stopStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 OSStatus result = noErr;
\r
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1471 if ( handle->drainCounter == 0 ) {
\r
1472 handle->drainCounter = 2;
\r
1473 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1476 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1477 if ( result != noErr ) {
\r
1478 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1479 errorText_ = errorStream_.str();
\r
1484 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1486 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1487 if ( result != noErr ) {
\r
1488 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1489 errorText_ = errorStream_.str();
\r
1494 stream_.state = STREAM_STOPPED;
\r
1497 if ( result == noErr ) return;
\r
1498 error( RtAudioError::SYSTEM_ERROR );
\r
1501 void RtApiCore :: abortStream( void )
\r
1504 if ( stream_.state == STREAM_STOPPED ) {
\r
1505 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1506 error( RtAudioError::WARNING );
\r
1510 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1511 handle->drainCounter = 2;
\r
1516 // This function will be called by a spawned thread when the user
\r
1517 // callback function signals that the stream should be stopped or
\r
1518 // aborted. It is better to handle it this way because the
\r
1519 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1520 // function is called.
\r
1521 static void *coreStopStream( void *ptr )
\r
1523 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1524 RtApiCore *object = (RtApiCore *) info->object;
\r
1526 object->stopStream();
\r
1527 pthread_exit( NULL );
\r
1530 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1531 const AudioBufferList *inBufferList,
\r
1532 const AudioBufferList *outBufferList )
\r
1534 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1535 if ( stream_.state == STREAM_CLOSED ) {
\r
1536 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1537 error( RtAudioError::WARNING );
\r
1541 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1542 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1544 // Check if we were draining the stream and signal is finished.
\r
1545 if ( handle->drainCounter > 3 ) {
\r
1546 ThreadHandle threadId;
\r
1548 stream_.state = STREAM_STOPPING;
\r
1549 if ( handle->internalDrain == true )
\r
1550 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1551 else // external call to stopStream()
\r
1552 pthread_cond_signal( &handle->condition );
\r
1556 AudioDeviceID outputDevice = handle->id[0];
\r
1558 // Invoke user callback to get fresh output data UNLESS we are
\r
1559 // draining stream or duplex mode AND the input/output devices are
\r
1560 // different AND this function is called for the input device.
\r
1561 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1562 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1563 double streamTime = getStreamTime();
\r
1564 RtAudioStreamStatus status = 0;
\r
1565 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1566 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1567 handle->xrun[0] = false;
\r
1569 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1570 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1571 handle->xrun[1] = false;
\r
1574 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1575 stream_.bufferSize, streamTime, status, info->userData );
\r
1576 if ( cbReturnValue == 2 ) {
\r
1577 stream_.state = STREAM_STOPPING;
\r
1578 handle->drainCounter = 2;
\r
1582 else if ( cbReturnValue == 1 ) {
\r
1583 handle->drainCounter = 1;
\r
1584 handle->internalDrain = true;
\r
1588 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1592 if ( handle->nStreams[0] == 1 ) {
\r
1593 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1595 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1597 else { // fill multiple streams with zeros
\r
1598 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1599 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1601 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1605 else if ( handle->nStreams[0] == 1 ) {
\r
1606 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1607 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1608 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1610 else { // copy from user buffer
\r
1611 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1612 stream_.userBuffer[0],
\r
1613 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1616 else { // fill multiple streams
\r
1617 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1618 if ( stream_.doConvertBuffer[0] ) {
\r
1619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1620 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1623 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1624 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1625 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1626 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1627 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1630 else { // fill multiple multi-channel streams with interleaved data
\r
1631 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1632 Float32 *out, *in;
\r
1634 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1635 UInt32 inChannels = stream_.nUserChannels[0];
\r
1636 if ( stream_.doConvertBuffer[0] ) {
\r
1637 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1638 inChannels = stream_.nDeviceChannels[0];
\r
1641 if ( inInterleaved ) inOffset = 1;
\r
1642 else inOffset = stream_.bufferSize;
\r
1644 channelsLeft = inChannels;
\r
1645 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1647 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1648 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1651 // Account for possible channel offset in first stream
\r
1652 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1653 streamChannels -= stream_.channelOffset[0];
\r
1654 outJump = stream_.channelOffset[0];
\r
1658 // Account for possible unfilled channels at end of the last stream
\r
1659 if ( streamChannels > channelsLeft ) {
\r
1660 outJump = streamChannels - channelsLeft;
\r
1661 streamChannels = channelsLeft;
\r
1664 // Determine input buffer offsets and skips
\r
1665 if ( inInterleaved ) {
\r
1666 inJump = inChannels;
\r
1667 in += inChannels - channelsLeft;
\r
1671 in += (inChannels - channelsLeft) * inOffset;
\r
1674 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1675 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1676 *out++ = in[j*inOffset];
\r
1681 channelsLeft -= streamChannels;
\r
1687 // Don't bother draining input
\r
1688 if ( handle->drainCounter ) {
\r
1689 handle->drainCounter++;
\r
1693 AudioDeviceID inputDevice;
\r
1694 inputDevice = handle->id[1];
\r
1695 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1697 if ( handle->nStreams[1] == 1 ) {
\r
1698 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1699 convertBuffer( stream_.userBuffer[1],
\r
1700 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1701 stream_.convertInfo[1] );
\r
1703 else { // copy to user buffer
\r
1704 memcpy( stream_.userBuffer[1],
\r
1705 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1706 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1709 else { // read from multiple streams
\r
1710 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1711 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1713 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1714 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1715 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1716 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1717 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1720 else { // read from multiple multi-channel streams
\r
1721 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1722 Float32 *out, *in;
\r
1724 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1725 UInt32 outChannels = stream_.nUserChannels[1];
\r
1726 if ( stream_.doConvertBuffer[1] ) {
\r
1727 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1728 outChannels = stream_.nDeviceChannels[1];
\r
1731 if ( outInterleaved ) outOffset = 1;
\r
1732 else outOffset = stream_.bufferSize;
\r
1734 channelsLeft = outChannels;
\r
1735 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1737 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1738 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1741 // Account for possible channel offset in first stream
\r
1742 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1743 streamChannels -= stream_.channelOffset[1];
\r
1744 inJump = stream_.channelOffset[1];
\r
1748 // Account for possible unread channels at end of the last stream
\r
1749 if ( streamChannels > channelsLeft ) {
\r
1750 inJump = streamChannels - channelsLeft;
\r
1751 streamChannels = channelsLeft;
\r
1754 // Determine output buffer offsets and skips
\r
1755 if ( outInterleaved ) {
\r
1756 outJump = outChannels;
\r
1757 out += outChannels - channelsLeft;
\r
1761 out += (outChannels - channelsLeft) * outOffset;
\r
1764 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1765 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1766 out[j*outOffset] = *in++;
\r
1771 channelsLeft -= streamChannels;
\r
1775 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1776 convertBuffer( stream_.userBuffer[1],
\r
1777 stream_.deviceBuffer,
\r
1778 stream_.convertInfo[1] );
\r
1784 //MUTEX_UNLOCK( &stream_.mutex );
\r
1786 RtApi::tickStreamTime();
\r
1790 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1794 case kAudioHardwareNotRunningError:
\r
1795 return "kAudioHardwareNotRunningError";
\r
1797 case kAudioHardwareUnspecifiedError:
\r
1798 return "kAudioHardwareUnspecifiedError";
\r
1800 case kAudioHardwareUnknownPropertyError:
\r
1801 return "kAudioHardwareUnknownPropertyError";
\r
1803 case kAudioHardwareBadPropertySizeError:
\r
1804 return "kAudioHardwareBadPropertySizeError";
\r
1806 case kAudioHardwareIllegalOperationError:
\r
1807 return "kAudioHardwareIllegalOperationError";
\r
1809 case kAudioHardwareBadObjectError:
\r
1810 return "kAudioHardwareBadObjectError";
\r
1812 case kAudioHardwareBadDeviceError:
\r
1813 return "kAudioHardwareBadDeviceError";
\r
1815 case kAudioHardwareBadStreamError:
\r
1816 return "kAudioHardwareBadStreamError";
\r
1818 case kAudioHardwareUnsupportedOperationError:
\r
1819 return "kAudioHardwareUnsupportedOperationError";
\r
1821 case kAudioDeviceUnsupportedFormatError:
\r
1822 return "kAudioDeviceUnsupportedFormatError";
\r
1824 case kAudioDevicePermissionsError:
\r
1825 return "kAudioDevicePermissionsError";
\r
1828 return "CoreAudio unknown error";
\r
1832 //******************** End of __MACOSX_CORE__ *********************//
\r
1835 #if defined(__UNIX_JACK__)
\r
1837 // JACK is a low-latency audio server, originally written for the
\r
1838 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1839 // connect a number of different applications to an audio device, as
\r
1840 // well as allowing them to share audio between themselves.
\r
1842 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1843 // have ports connected to the server. The JACK server is typically
\r
1844 // started in a terminal as follows:
\r
1846 // .jackd -d alsa -d hw:0
\r
1848 // or through an interface program such as qjackctl. Many of the
\r
1849 // parameters normally set for a stream are fixed by the JACK server
\r
1850 // and can be specified when the JACK server is started. In
\r
1853 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1855 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1856 // frames, and number of buffers = 4. Once the server is running, it
\r
1857 // is not possible to override these values. If the values are not
\r
1858 // specified in the command-line, the JACK server uses default values.
\r
1860 // The JACK server does not have to be running when an instance of
\r
1861 // RtApiJack is created, though the function getDeviceCount() will
\r
1862 // report 0 devices found until JACK has been started. When no
\r
1863 // devices are available (i.e., the JACK server is not running), a
\r
1864 // stream cannot be opened.
\r
1866 #include <jack/jack.h>
\r
1867 #include <unistd.h>
\r
1870 // A structure to hold various information related to the Jack API
\r
1871 // implementation.
\r
1872 struct JackHandle {
\r
1873 jack_client_t *client;
\r
1874 jack_port_t **ports[2];
\r
1875 std::string deviceName[2];
\r
1877 pthread_cond_t condition;
\r
1878 int drainCounter; // Tracks callback counts when draining
\r
1879 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1882 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1885 static void jackSilentError( const char * ) {};
\r
1887 RtApiJack :: RtApiJack()
\r
1889 // Nothing to do here.
\r
1890 #if !defined(__RTAUDIO_DEBUG__)
\r
1891 // Turn off Jack's internal error reporting.
\r
1892 jack_set_error_function( &jackSilentError );
\r
1896 RtApiJack :: ~RtApiJack()
\r
1898 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1901 unsigned int RtApiJack :: getDeviceCount( void )
\r
1903 // See if we can become a jack client.
\r
1904 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1905 jack_status_t *status = NULL;
\r
1906 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1907 if ( client == 0 ) return 0;
\r
1909 const char **ports;
\r
1910 std::string port, previousPort;
\r
1911 unsigned int nChannels = 0, nDevices = 0;
\r
1912 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1914 // Parse the port names up to the first colon (:).
\r
1915 size_t iColon = 0;
\r
1917 port = (char *) ports[ nChannels ];
\r
1918 iColon = port.find(":");
\r
1919 if ( iColon != std::string::npos ) {
\r
1920 port = port.substr( 0, iColon + 1 );
\r
1921 if ( port != previousPort ) {
\r
1923 previousPort = port;
\r
1926 } while ( ports[++nChannels] );
\r
1930 jack_client_close( client );
\r
1934 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1936 RtAudio::DeviceInfo info;
\r
1937 info.probed = false;
\r
1939 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1940 jack_status_t *status = NULL;
\r
1941 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1942 if ( client == 0 ) {
\r
1943 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1944 error( RtAudioError::WARNING );
\r
1948 const char **ports;
\r
1949 std::string port, previousPort;
\r
1950 unsigned int nPorts = 0, nDevices = 0;
\r
1951 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1953 // Parse the port names up to the first colon (:).
\r
1954 size_t iColon = 0;
\r
1956 port = (char *) ports[ nPorts ];
\r
1957 iColon = port.find(":");
\r
1958 if ( iColon != std::string::npos ) {
\r
1959 port = port.substr( 0, iColon );
\r
1960 if ( port != previousPort ) {
\r
1961 if ( nDevices == device ) info.name = port;
\r
1963 previousPort = port;
\r
1966 } while ( ports[++nPorts] );
\r
1970 if ( device >= nDevices ) {
\r
1971 jack_client_close( client );
\r
1972 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1973 error( RtAudioError::INVALID_USE );
\r
1977 // Get the current jack server sample rate.
\r
1978 info.sampleRates.clear();
\r
1979 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1981 // Count the available ports containing the client name as device
\r
1982 // channels. Jack "input ports" equal RtAudio output channels.
\r
1983 unsigned int nChannels = 0;
\r
1984 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1986 while ( ports[ nChannels ] ) nChannels++;
\r
1988 info.outputChannels = nChannels;
\r
1991 // Jack "output ports" equal RtAudio input channels.
\r
1993 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1995 while ( ports[ nChannels ] ) nChannels++;
\r
1997 info.inputChannels = nChannels;
\r
2000 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2001 jack_client_close(client);
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 // If device opens for both playback and capture, we determine the channels.
\r
2008 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2009 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2011 // Jack always uses 32-bit floats.
\r
2012 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2014 // Jack doesn't provide default devices so we'll use the first available one.
\r
2015 if ( device == 0 && info.outputChannels > 0 )
\r
2016 info.isDefaultOutput = true;
\r
2017 if ( device == 0 && info.inputChannels > 0 )
\r
2018 info.isDefaultInput = true;
\r
2020 jack_client_close(client);
\r
2021 info.probed = true;
\r
2025 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2027 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2029 RtApiJack *object = (RtApiJack *) info->object;
\r
2030 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2035 // This function will be called by a spawned thread when the Jack
\r
2036 // server signals that it is shutting down. It is necessary to handle
\r
2037 // it this way because the jackShutdown() function must return before
\r
2038 // the jack_deactivate() function (in closeStream()) will return.
\r
2039 static void *jackCloseStream( void *ptr )
\r
2041 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2042 RtApiJack *object = (RtApiJack *) info->object;
\r
2044 object->closeStream();
\r
2046 pthread_exit( NULL );
\r
2048 static void jackShutdown( void *infoPointer )
\r
2050 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2051 RtApiJack *object = (RtApiJack *) info->object;
\r
2053 // Check current stream state. If stopped, then we'll assume this
\r
2054 // was called as a result of a call to RtApiJack::stopStream (the
\r
2055 // deactivation of a client handle causes this function to be called).
\r
2056 // If not, we'll assume the Jack server is shutting down or some
\r
2057 // other problem occurred and we should close the stream.
\r
2058 if ( object->isStreamRunning() == false ) return;
\r
2060 ThreadHandle threadId;
\r
2061 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2062 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2065 static int jackXrun( void *infoPointer )
\r
2067 JackHandle *handle = (JackHandle *) infoPointer;
\r
2069 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2070 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2075 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2076 unsigned int firstChannel, unsigned int sampleRate,
\r
2077 RtAudioFormat format, unsigned int *bufferSize,
\r
2078 RtAudio::StreamOptions *options )
\r
2080 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2082 // Look for jack server and try to become a client (only do once per stream).
\r
2083 jack_client_t *client = 0;
\r
2084 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2085 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2086 jack_status_t *status = NULL;
\r
2087 if ( options && !options->streamName.empty() )
\r
2088 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2090 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2091 if ( client == 0 ) {
\r
2092 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2093 error( RtAudioError::WARNING );
\r
2098 // The handle must have been created on an earlier pass.
\r
2099 client = handle->client;
\r
2102 const char **ports;
\r
2103 std::string port, previousPort, deviceName;
\r
2104 unsigned int nPorts = 0, nDevices = 0;
\r
2105 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2107 // Parse the port names up to the first colon (:).
\r
2108 size_t iColon = 0;
\r
2110 port = (char *) ports[ nPorts ];
\r
2111 iColon = port.find(":");
\r
2112 if ( iColon != std::string::npos ) {
\r
2113 port = port.substr( 0, iColon );
\r
2114 if ( port != previousPort ) {
\r
2115 if ( nDevices == device ) deviceName = port;
\r
2117 previousPort = port;
\r
2120 } while ( ports[++nPorts] );
\r
2124 if ( device >= nDevices ) {
\r
2125 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2129 // Count the available ports containing the client name as device
\r
2130 // channels. Jack "input ports" equal RtAudio output channels.
\r
2131 unsigned int nChannels = 0;
\r
2132 unsigned long flag = JackPortIsInput;
\r
2133 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2134 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2136 while ( ports[ nChannels ] ) nChannels++;
\r
2140 // Compare the jack ports for specified client to the requested number of channels.
\r
2141 if ( nChannels < (channels + firstChannel) ) {
\r
2142 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2143 errorText_ = errorStream_.str();
\r
2147 // Check the jack server sample rate.
\r
2148 unsigned int jackRate = jack_get_sample_rate( client );
\r
2149 if ( sampleRate != jackRate ) {
\r
2150 jack_client_close( client );
\r
2151 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2152 errorText_ = errorStream_.str();
\r
2155 stream_.sampleRate = jackRate;
\r
2157 // Get the latency of the JACK port.
\r
2158 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2159 if ( ports[ firstChannel ] ) {
\r
2160 // Added by Ge Wang
\r
2161 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2162 // the range (usually the min and max are equal)
\r
2163 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2164 // get the latency range
\r
2165 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2166 // be optimistic, use the min!
\r
2167 stream_.latency[mode] = latrange.min;
\r
2168 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2172 // The jack server always uses 32-bit floating-point data.
\r
2173 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2174 stream_.userFormat = format;
\r
2176 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2177 else stream_.userInterleaved = true;
\r
2179 // Jack always uses non-interleaved buffers.
\r
2180 stream_.deviceInterleaved[mode] = false;
\r
2182 // Jack always provides host byte-ordered data.
\r
2183 stream_.doByteSwap[mode] = false;
\r
2185 // Get the buffer size. The buffer size and number of buffers
\r
2186 // (periods) is set when the jack server is started.
\r
2187 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2188 *bufferSize = stream_.bufferSize;
\r
2190 stream_.nDeviceChannels[mode] = channels;
\r
2191 stream_.nUserChannels[mode] = channels;
\r
2193 // Set flags for buffer conversion.
\r
2194 stream_.doConvertBuffer[mode] = false;
\r
2195 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2196 stream_.doConvertBuffer[mode] = true;
\r
2197 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2198 stream_.nUserChannels[mode] > 1 )
\r
2199 stream_.doConvertBuffer[mode] = true;
\r
2201 // Allocate our JackHandle structure for the stream.
\r
2202 if ( handle == 0 ) {
\r
2204 handle = new JackHandle;
\r
2206 catch ( std::bad_alloc& ) {
\r
2207 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2211 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2212 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2215 stream_.apiHandle = (void *) handle;
\r
2216 handle->client = client;
\r
2218 handle->deviceName[mode] = deviceName;
\r
2220 // Allocate necessary internal buffers.
\r
2221 unsigned long bufferBytes;
\r
2222 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2223 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2224 if ( stream_.userBuffer[mode] == NULL ) {
\r
2225 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2229 if ( stream_.doConvertBuffer[mode] ) {
\r
2231 bool makeBuffer = true;
\r
2232 if ( mode == OUTPUT )
\r
2233 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2234 else { // mode == INPUT
\r
2235 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2236 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2237 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2238 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2242 if ( makeBuffer ) {
\r
2243 bufferBytes *= *bufferSize;
\r
2244 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2245 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2246 if ( stream_.deviceBuffer == NULL ) {
\r
2247 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2253 // Allocate memory for the Jack ports (channels) identifiers.
\r
2254 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2255 if ( handle->ports[mode] == NULL ) {
\r
2256 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2260 stream_.device[mode] = device;
\r
2261 stream_.channelOffset[mode] = firstChannel;
\r
2262 stream_.state = STREAM_STOPPED;
\r
2263 stream_.callbackInfo.object = (void *) this;
\r
2265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2266 // We had already set up the stream for output.
\r
2267 stream_.mode = DUPLEX;
\r
2269 stream_.mode = mode;
\r
2270 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2271 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2272 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2275 // Register our ports.
\r
2277 if ( mode == OUTPUT ) {
\r
2278 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2279 snprintf( label, 64, "outport %d", i );
\r
2280 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2281 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2285 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2286 snprintf( label, 64, "inport %d", i );
\r
2287 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2288 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2292 // Setup the buffer conversion information structure. We don't use
\r
2293 // buffers to do channel offsets, so we override that parameter
\r
2295 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2301 pthread_cond_destroy( &handle->condition );
\r
2302 jack_client_close( handle->client );
\r
2304 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2305 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2308 stream_.apiHandle = 0;
\r
2311 for ( int i=0; i<2; i++ ) {
\r
2312 if ( stream_.userBuffer[i] ) {
\r
2313 free( stream_.userBuffer[i] );
\r
2314 stream_.userBuffer[i] = 0;
\r
2318 if ( stream_.deviceBuffer ) {
\r
2319 free( stream_.deviceBuffer );
\r
2320 stream_.deviceBuffer = 0;
\r
2326 void RtApiJack :: closeStream( void )
\r
2328 if ( stream_.state == STREAM_CLOSED ) {
\r
2329 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2330 error( RtAudioError::WARNING );
\r
2334 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2337 if ( stream_.state == STREAM_RUNNING )
\r
2338 jack_deactivate( handle->client );
\r
2340 jack_client_close( handle->client );
\r
2344 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2345 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2346 pthread_cond_destroy( &handle->condition );
\r
2348 stream_.apiHandle = 0;
\r
2351 for ( int i=0; i<2; i++ ) {
\r
2352 if ( stream_.userBuffer[i] ) {
\r
2353 free( stream_.userBuffer[i] );
\r
2354 stream_.userBuffer[i] = 0;
\r
2358 if ( stream_.deviceBuffer ) {
\r
2359 free( stream_.deviceBuffer );
\r
2360 stream_.deviceBuffer = 0;
\r
2363 stream_.mode = UNINITIALIZED;
\r
2364 stream_.state = STREAM_CLOSED;
\r
2367 void RtApiJack :: startStream( void )
\r
2370 if ( stream_.state == STREAM_RUNNING ) {
\r
2371 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2372 error( RtAudioError::WARNING );
\r
2376 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2377 int result = jack_activate( handle->client );
\r
2379 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2383 const char **ports;
\r
2385 // Get the list of available ports.
\r
2386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2388 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2389 if ( ports == NULL) {
\r
2390 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2394 // Now make the port connections. Since RtAudio wasn't designed to
\r
2395 // allow the user to select particular channels of a device, we'll
\r
2396 // just open the first "nChannels" ports with offset.
\r
2397 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2399 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2400 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2403 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2410 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2412 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2413 if ( ports == NULL) {
\r
2414 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2418 // Now make the port connections. See note above.
\r
2419 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2421 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2422 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2425 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2432 handle->drainCounter = 0;
\r
2433 handle->internalDrain = false;
\r
2434 stream_.state = STREAM_RUNNING;
\r
2437 if ( result == 0 ) return;
\r
2438 error( RtAudioError::SYSTEM_ERROR );
\r
2441 void RtApiJack :: stopStream( void )
\r
2444 if ( stream_.state == STREAM_STOPPED ) {
\r
2445 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2446 error( RtAudioError::WARNING );
\r
2450 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2453 if ( handle->drainCounter == 0 ) {
\r
2454 handle->drainCounter = 2;
\r
2455 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2459 jack_deactivate( handle->client );
\r
2460 stream_.state = STREAM_STOPPED;
\r
2463 void RtApiJack :: abortStream( void )
\r
2466 if ( stream_.state == STREAM_STOPPED ) {
\r
2467 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2468 error( RtAudioError::WARNING );
\r
2472 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2473 handle->drainCounter = 2;
\r
2478 // This function will be called by a spawned thread when the user
\r
2479 // callback function signals that the stream should be stopped or
\r
2480 // aborted. It is necessary to handle it this way because the
\r
2481 // callbackEvent() function must return before the jack_deactivate()
\r
2482 // function will return.
\r
2483 static void *jackStopStream( void *ptr )
\r
2485 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2486 RtApiJack *object = (RtApiJack *) info->object;
\r
2488 object->stopStream();
\r
2489 pthread_exit( NULL );
\r
2492 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2494 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2495 if ( stream_.state == STREAM_CLOSED ) {
\r
2496 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2497 error( RtAudioError::WARNING );
\r
2500 if ( stream_.bufferSize != nframes ) {
\r
2501 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2502 error( RtAudioError::WARNING );
\r
2506 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2507 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 // Check if we were draining the stream and signal is finished.
\r
2510 if ( handle->drainCounter > 3 ) {
\r
2511 ThreadHandle threadId;
\r
2513 stream_.state = STREAM_STOPPING;
\r
2514 if ( handle->internalDrain == true )
\r
2515 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2517 pthread_cond_signal( &handle->condition );
\r
2521 // Invoke user callback first, to get fresh output data.
\r
2522 if ( handle->drainCounter == 0 ) {
\r
2523 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2524 double streamTime = getStreamTime();
\r
2525 RtAudioStreamStatus status = 0;
\r
2526 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2527 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2528 handle->xrun[0] = false;
\r
2530 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2531 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2532 handle->xrun[1] = false;
\r
2534 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2535 stream_.bufferSize, streamTime, status, info->userData );
\r
2536 if ( cbReturnValue == 2 ) {
\r
2537 stream_.state = STREAM_STOPPING;
\r
2538 handle->drainCounter = 2;
\r
2540 pthread_create( &id, NULL, jackStopStream, info );
\r
2543 else if ( cbReturnValue == 1 ) {
\r
2544 handle->drainCounter = 1;
\r
2545 handle->internalDrain = true;
\r
2549 jack_default_audio_sample_t *jackbuffer;
\r
2550 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2551 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2553 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2555 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2556 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2557 memset( jackbuffer, 0, bufferBytes );
\r
2561 else if ( stream_.doConvertBuffer[0] ) {
\r
2563 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2565 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2566 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2567 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2570 else { // no buffer conversion
\r
2571 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2572 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2573 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2578 // Don't bother draining input
\r
2579 if ( handle->drainCounter ) {
\r
2580 handle->drainCounter++;
\r
2584 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2586 if ( stream_.doConvertBuffer[1] ) {
\r
2587 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2588 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2589 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2591 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2593 else { // no buffer conversion
\r
2594 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2595 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2596 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2602 RtApi::tickStreamTime();
\r
2605 //******************** End of __UNIX_JACK__ *********************//
\r
2608 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2610 // The ASIO API is designed around a callback scheme, so this
\r
2611 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2612 // Jack. The primary constraint with ASIO is that it only allows
\r
2613 // access to a single driver at a time. Thus, it is not possible to
\r
2614 // have more than one simultaneous RtAudio stream.
\r
2616 // This implementation also requires a number of external ASIO files
\r
2617 // and a few global variables. The ASIO callback scheme does not
\r
2618 // allow for the passing of user data, so we must create a global
\r
2619 // pointer to our callbackInfo structure.
\r
2621 // On unix systems, we make use of a pthread condition variable.
\r
2622 // Since there is no equivalent in Windows, I hacked something based
\r
2623 // on information found in
\r
2624 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2626 #include "asiosys.h"
\r
2628 #include "iasiothiscallresolver.h"
\r
2629 #include "asiodrivers.h"
\r
2632 static AsioDrivers drivers;
\r
2633 static ASIOCallbacks asioCallbacks;
\r
2634 static ASIODriverInfo driverInfo;
\r
2635 static CallbackInfo *asioCallbackInfo;
\r
2636 static bool asioXRun;
\r
2638 struct AsioHandle {
\r
2639 int drainCounter; // Tracks callback counts when draining
\r
2640 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2641 ASIOBufferInfo *bufferInfos;
\r
2645 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2648 // Function declarations (definitions at end of section)
\r
2649 static const char* getAsioErrorString( ASIOError result );
\r
2650 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2651 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2653 RtApiAsio :: RtApiAsio()
\r
2655 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2656 // CoInitialize beforehand, but it must be for appartment threading
\r
2657 // (in which case, CoInitilialize will return S_FALSE here).
\r
2658 coInitialized_ = false;
\r
2659 HRESULT hr = CoInitialize( NULL );
\r
2660 if ( FAILED(hr) ) {
\r
2661 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2662 error( RtAudioError::WARNING );
\r
2664 coInitialized_ = true;
\r
2666 drivers.removeCurrentDriver();
\r
2667 driverInfo.asioVersion = 2;
\r
2669 // See note in DirectSound implementation about GetDesktopWindow().
\r
2670 driverInfo.sysRef = GetForegroundWindow();
\r
2673 RtApiAsio :: ~RtApiAsio()
\r
2675 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2676 if ( coInitialized_ ) CoUninitialize();
\r
2679 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2681 return (unsigned int) drivers.asioGetNumDev();
\r
2684 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2686 RtAudio::DeviceInfo info;
\r
2687 info.probed = false;
\r
2690 unsigned int nDevices = getDeviceCount();
\r
2691 if ( nDevices == 0 ) {
\r
2692 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2693 error( RtAudioError::INVALID_USE );
\r
2697 if ( device >= nDevices ) {
\r
2698 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2699 error( RtAudioError::INVALID_USE );
\r
2703 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2704 if ( stream_.state != STREAM_CLOSED ) {
\r
2705 if ( device >= devices_.size() ) {
\r
2706 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2707 error( RtAudioError::WARNING );
\r
2710 return devices_[ device ];
\r
2713 char driverName[32];
\r
2714 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2715 if ( result != ASE_OK ) {
\r
2716 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2717 errorText_ = errorStream_.str();
\r
2718 error( RtAudioError::WARNING );
\r
2722 info.name = driverName;
\r
2724 if ( !drivers.loadDriver( driverName ) ) {
\r
2725 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2726 errorText_ = errorStream_.str();
\r
2727 error( RtAudioError::WARNING );
\r
2731 result = ASIOInit( &driverInfo );
\r
2732 if ( result != ASE_OK ) {
\r
2733 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2734 errorText_ = errorStream_.str();
\r
2735 error( RtAudioError::WARNING );
\r
2739 // Determine the device channel information.
\r
2740 long inputChannels, outputChannels;
\r
2741 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2742 if ( result != ASE_OK ) {
\r
2743 drivers.removeCurrentDriver();
\r
2744 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2745 errorText_ = errorStream_.str();
\r
2746 error( RtAudioError::WARNING );
\r
2750 info.outputChannels = outputChannels;
\r
2751 info.inputChannels = inputChannels;
\r
2752 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2753 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2755 // Determine the supported sample rates.
\r
2756 info.sampleRates.clear();
\r
2757 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2758 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2759 if ( result == ASE_OK )
\r
2760 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2763 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2764 ASIOChannelInfo channelInfo;
\r
2765 channelInfo.channel = 0;
\r
2766 channelInfo.isInput = true;
\r
2767 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2768 result = ASIOGetChannelInfo( &channelInfo );
\r
2769 if ( result != ASE_OK ) {
\r
2770 drivers.removeCurrentDriver();
\r
2771 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2772 errorText_ = errorStream_.str();
\r
2773 error( RtAudioError::WARNING );
\r
2777 info.nativeFormats = 0;
\r
2778 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2779 info.nativeFormats |= RTAUDIO_SINT16;
\r
2780 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2781 info.nativeFormats |= RTAUDIO_SINT32;
\r
2782 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2783 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2784 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2785 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2786 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2787 info.nativeFormats |= RTAUDIO_SINT24;
\r
2789 if ( info.outputChannels > 0 )
\r
2790 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2791 if ( info.inputChannels > 0 )
\r
2792 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2794 info.probed = true;
\r
2795 drivers.removeCurrentDriver();
\r
2799 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2801 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2802 object->callbackEvent( index );
\r
2805 void RtApiAsio :: saveDeviceInfo( void )
\r
2809 unsigned int nDevices = getDeviceCount();
\r
2810 devices_.resize( nDevices );
\r
2811 for ( unsigned int i=0; i<nDevices; i++ )
\r
2812 devices_[i] = getDeviceInfo( i );
\r
2815 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2816 unsigned int firstChannel, unsigned int sampleRate,
\r
2817 RtAudioFormat format, unsigned int *bufferSize,
\r
2818 RtAudio::StreamOptions *options )
\r
2820 // For ASIO, a duplex stream MUST use the same driver.
\r
2821 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2822 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2826 char driverName[32];
\r
2827 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2828 if ( result != ASE_OK ) {
\r
2829 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2830 errorText_ = errorStream_.str();
\r
2834 // Only load the driver once for duplex stream.
\r
2835 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2836 // The getDeviceInfo() function will not work when a stream is open
\r
2837 // because ASIO does not allow multiple devices to run at the same
\r
2838 // time. Thus, we'll probe the system before opening a stream and
\r
2839 // save the results for use by getDeviceInfo().
\r
2840 this->saveDeviceInfo();
\r
2842 if ( !drivers.loadDriver( driverName ) ) {
\r
2843 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2844 errorText_ = errorStream_.str();
\r
2848 result = ASIOInit( &driverInfo );
\r
2849 if ( result != ASE_OK ) {
\r
2850 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2851 errorText_ = errorStream_.str();
\r
2856 // Check the device channel count.
\r
2857 long inputChannels, outputChannels;
\r
2858 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2859 if ( result != ASE_OK ) {
\r
2860 drivers.removeCurrentDriver();
\r
2861 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2862 errorText_ = errorStream_.str();
\r
2866 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2867 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2868 drivers.removeCurrentDriver();
\r
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2870 errorText_ = errorStream_.str();
\r
2873 stream_.nDeviceChannels[mode] = channels;
\r
2874 stream_.nUserChannels[mode] = channels;
\r
2875 stream_.channelOffset[mode] = firstChannel;
\r
2877 // Verify the sample rate is supported.
\r
2878 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2879 if ( result != ASE_OK ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Get the current sample rate
\r
2887 ASIOSampleRate currentRate;
\r
2888 result = ASIOGetSampleRate( ¤tRate );
\r
2889 if ( result != ASE_OK ) {
\r
2890 drivers.removeCurrentDriver();
\r
2891 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2892 errorText_ = errorStream_.str();
\r
2896 // Set the sample rate only if necessary
\r
2897 if ( currentRate != sampleRate ) {
\r
2898 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2899 if ( result != ASE_OK ) {
\r
2900 drivers.removeCurrentDriver();
\r
2901 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2902 errorText_ = errorStream_.str();
\r
2907 // Determine the driver data type.
\r
2908 ASIOChannelInfo channelInfo;
\r
2909 channelInfo.channel = 0;
\r
2910 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2911 else channelInfo.isInput = true;
\r
2912 result = ASIOGetChannelInfo( &channelInfo );
\r
2913 if ( result != ASE_OK ) {
\r
2914 drivers.removeCurrentDriver();
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2916 errorText_ = errorStream_.str();
\r
2920 // Assuming WINDOWS host is always little-endian.
\r
2921 stream_.doByteSwap[mode] = false;
\r
2922 stream_.userFormat = format;
\r
2923 stream_.deviceFormat[mode] = 0;
\r
2924 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2925 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2926 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2928 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2930 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2932 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2933 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2934 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2936 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2937 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2938 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2940 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2941 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2942 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2945 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2946 drivers.removeCurrentDriver();
\r
2947 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2948 errorText_ = errorStream_.str();
\r
2952 // Set the buffer size. For a duplex stream, this will end up
\r
2953 // setting the buffer size based on the input constraints, which
\r
2955 long minSize, maxSize, preferSize, granularity;
\r
2956 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2957 if ( result != ASE_OK ) {
\r
2958 drivers.removeCurrentDriver();
\r
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2960 errorText_ = errorStream_.str();
\r
2964 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2965 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2966 else if ( granularity == -1 ) {
\r
2967 // Make sure bufferSize is a power of two.
\r
2968 int log2_of_min_size = 0;
\r
2969 int log2_of_max_size = 0;
\r
2971 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2972 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2973 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2976 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2977 int min_delta_num = log2_of_min_size;
\r
2979 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2980 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2981 if (current_delta < min_delta) {
\r
2982 min_delta = current_delta;
\r
2983 min_delta_num = i;
\r
2987 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2988 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2989 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2991 else if ( granularity != 0 ) {
\r
2992 // Set to an even multiple of granularity, rounding up.
\r
2993 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2996 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2997 drivers.removeCurrentDriver();
\r
2998 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3002 stream_.bufferSize = *bufferSize;
\r
3003 stream_.nBuffers = 2;
\r
3005 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3006 else stream_.userInterleaved = true;
\r
3008 // ASIO always uses non-interleaved buffers.
\r
3009 stream_.deviceInterleaved[mode] = false;
\r
3011 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3012 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3013 if ( handle == 0 ) {
\r
3015 handle = new AsioHandle;
\r
3017 catch ( std::bad_alloc& ) {
\r
3018 //if ( handle == NULL ) {
\r
3019 drivers.removeCurrentDriver();
\r
3020 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3023 handle->bufferInfos = 0;
\r
3025 // Create a manual-reset event.
\r
3026 handle->condition = CreateEvent( NULL, // no security
\r
3027 TRUE, // manual-reset
\r
3028 FALSE, // non-signaled initially
\r
3029 NULL ); // unnamed
\r
3030 stream_.apiHandle = (void *) handle;
\r
3033 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3034 // and output separately, we'll have to dispose of previously
\r
3035 // created output buffers for a duplex stream.
\r
3036 long inputLatency, outputLatency;
\r
3037 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3038 ASIODisposeBuffers();
\r
3039 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3042 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3043 bool buffersAllocated = false;
\r
3044 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3045 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3046 if ( handle->bufferInfos == NULL ) {
\r
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3048 errorText_ = errorStream_.str();
\r
3052 ASIOBufferInfo *infos;
\r
3053 infos = handle->bufferInfos;
\r
3054 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3055 infos->isInput = ASIOFalse;
\r
3056 infos->channelNum = i + stream_.channelOffset[0];
\r
3057 infos->buffers[0] = infos->buffers[1] = 0;
\r
3059 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3060 infos->isInput = ASIOTrue;
\r
3061 infos->channelNum = i + stream_.channelOffset[1];
\r
3062 infos->buffers[0] = infos->buffers[1] = 0;
\r
3065 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3066 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3067 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3068 asioCallbacks.asioMessage = &asioMessages;
\r
3069 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3070 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3071 if ( result != ASE_OK ) {
\r
3072 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3073 errorText_ = errorStream_.str();
\r
3076 buffersAllocated = true;
\r
3078 // Set flags for buffer conversion.
\r
3079 stream_.doConvertBuffer[mode] = false;
\r
3080 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3081 stream_.doConvertBuffer[mode] = true;
\r
3082 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3083 stream_.nUserChannels[mode] > 1 )
\r
3084 stream_.doConvertBuffer[mode] = true;
\r
3086 // Allocate necessary internal buffers
\r
3087 unsigned long bufferBytes;
\r
3088 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3089 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3090 if ( stream_.userBuffer[mode] == NULL ) {
\r
3091 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3095 if ( stream_.doConvertBuffer[mode] ) {
\r
3097 bool makeBuffer = true;
\r
3098 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3099 if ( mode == INPUT ) {
\r
3100 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3101 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3102 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3106 if ( makeBuffer ) {
\r
3107 bufferBytes *= *bufferSize;
\r
3108 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3109 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3110 if ( stream_.deviceBuffer == NULL ) {
\r
3111 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3117 stream_.sampleRate = sampleRate;
\r
3118 stream_.device[mode] = device;
\r
3119 stream_.state = STREAM_STOPPED;
\r
3120 asioCallbackInfo = &stream_.callbackInfo;
\r
3121 stream_.callbackInfo.object = (void *) this;
\r
3122 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3123 // We had already set up an output stream.
\r
3124 stream_.mode = DUPLEX;
\r
3126 stream_.mode = mode;
\r
3128 // Determine device latencies
\r
3129 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3130 if ( result != ASE_OK ) {
\r
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3132 errorText_ = errorStream_.str();
\r
3133 error( RtAudioError::WARNING); // warn but don't fail
\r
3136 stream_.latency[0] = outputLatency;
\r
3137 stream_.latency[1] = inputLatency;
\r
3140 // Setup the buffer conversion information structure. We don't use
\r
3141 // buffers to do channel offsets, so we override that parameter
\r
3143 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3148 if ( buffersAllocated )
\r
3149 ASIODisposeBuffers();
\r
3150 drivers.removeCurrentDriver();
\r
3153 CloseHandle( handle->condition );
\r
3154 if ( handle->bufferInfos )
\r
3155 free( handle->bufferInfos );
\r
3157 stream_.apiHandle = 0;
\r
3160 for ( int i=0; i<2; i++ ) {
\r
3161 if ( stream_.userBuffer[i] ) {
\r
3162 free( stream_.userBuffer[i] );
\r
3163 stream_.userBuffer[i] = 0;
\r
3167 if ( stream_.deviceBuffer ) {
\r
3168 free( stream_.deviceBuffer );
\r
3169 stream_.deviceBuffer = 0;
\r
3175 void RtApiAsio :: closeStream()
\r
3177 if ( stream_.state == STREAM_CLOSED ) {
\r
3178 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3179 error( RtAudioError::WARNING );
\r
3183 if ( stream_.state == STREAM_RUNNING ) {
\r
3184 stream_.state = STREAM_STOPPED;
\r
3187 ASIODisposeBuffers();
\r
3188 drivers.removeCurrentDriver();
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3192 CloseHandle( handle->condition );
\r
3193 if ( handle->bufferInfos )
\r
3194 free( handle->bufferInfos );
\r
3196 stream_.apiHandle = 0;
\r
3199 for ( int i=0; i<2; i++ ) {
\r
3200 if ( stream_.userBuffer[i] ) {
\r
3201 free( stream_.userBuffer[i] );
\r
3202 stream_.userBuffer[i] = 0;
\r
3206 if ( stream_.deviceBuffer ) {
\r
3207 free( stream_.deviceBuffer );
\r
3208 stream_.deviceBuffer = 0;
\r
3211 stream_.mode = UNINITIALIZED;
\r
3212 stream_.state = STREAM_CLOSED;
\r
3215 bool stopThreadCalled = false;
\r
3217 void RtApiAsio :: startStream()
\r
3220 if ( stream_.state == STREAM_RUNNING ) {
\r
3221 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3222 error( RtAudioError::WARNING );
\r
3226 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3227 ASIOError result = ASIOStart();
\r
3228 if ( result != ASE_OK ) {
\r
3229 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3230 errorText_ = errorStream_.str();
\r
3234 handle->drainCounter = 0;
\r
3235 handle->internalDrain = false;
\r
3236 ResetEvent( handle->condition );
\r
3237 stream_.state = STREAM_RUNNING;
\r
3241 stopThreadCalled = false;
\r
3243 if ( result == ASE_OK ) return;
\r
3244 error( RtAudioError::SYSTEM_ERROR );
\r
3247 void RtApiAsio :: stopStream()
\r
3250 if ( stream_.state == STREAM_STOPPED ) {
\r
3251 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3252 error( RtAudioError::WARNING );
\r
3256 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3258 if ( handle->drainCounter == 0 ) {
\r
3259 handle->drainCounter = 2;
\r
3260 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3264 stream_.state = STREAM_STOPPED;
\r
3266 ASIOError result = ASIOStop();
\r
3267 if ( result != ASE_OK ) {
\r
3268 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3269 errorText_ = errorStream_.str();
\r
3272 if ( result == ASE_OK ) return;
\r
3273 error( RtAudioError::SYSTEM_ERROR );
\r
3276 void RtApiAsio :: abortStream()
\r
3279 if ( stream_.state == STREAM_STOPPED ) {
\r
3280 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3281 error( RtAudioError::WARNING );
\r
3285 // The following lines were commented-out because some behavior was
\r
3286 // noted where the device buffers need to be zeroed to avoid
\r
3287 // continuing sound, even when the device buffers are completely
\r
3288 // disposed. So now, calling abort is the same as calling stop.
\r
3289 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3290 // handle->drainCounter = 2;
\r
3294 // This function will be called by a spawned thread when the user
\r
3295 // callback function signals that the stream should be stopped or
\r
3296 // aborted. It is necessary to handle it this way because the
\r
3297 // callbackEvent() function must return before the ASIOStop()
\r
3298 // function will return.
\r
3299 static unsigned __stdcall asioStopStream( void *ptr )
\r
3301 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3302 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3304 object->stopStream();
\r
3305 _endthreadex( 0 );
\r
3309 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3311 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3312 if ( stream_.state == STREAM_CLOSED ) {
\r
3313 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3314 error( RtAudioError::WARNING );
\r
3318 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3321 // Check if we were draining the stream and signal if finished.
\r
3322 if ( handle->drainCounter > 3 ) {
\r
3324 stream_.state = STREAM_STOPPING;
\r
3325 if ( handle->internalDrain == false )
\r
3326 SetEvent( handle->condition );
\r
3327 else { // spawn a thread to stop the stream
\r
3328 unsigned threadId;
\r
3329 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3330 &stream_.callbackInfo, 0, &threadId );
\r
3335 // Invoke user callback to get fresh output data UNLESS we are
\r
3336 // draining stream.
\r
3337 if ( handle->drainCounter == 0 ) {
\r
3338 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3339 double streamTime = getStreamTime();
\r
3340 RtAudioStreamStatus status = 0;
\r
3341 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3342 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3345 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3346 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3349 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3350 stream_.bufferSize, streamTime, status, info->userData );
\r
3351 if ( cbReturnValue == 2 ) {
\r
3352 stream_.state = STREAM_STOPPING;
\r
3353 handle->drainCounter = 2;
\r
3354 unsigned threadId;
\r
3355 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3356 &stream_.callbackInfo, 0, &threadId );
\r
3359 else if ( cbReturnValue == 1 ) {
\r
3360 handle->drainCounter = 1;
\r
3361 handle->internalDrain = true;
\r
3365 unsigned int nChannels, bufferBytes, i, j;
\r
3366 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3369 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3371 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3373 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3374 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3375 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3379 else if ( stream_.doConvertBuffer[0] ) {
\r
3381 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3382 if ( stream_.doByteSwap[0] )
\r
3383 byteSwapBuffer( stream_.deviceBuffer,
\r
3384 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3385 stream_.deviceFormat[0] );
\r
3387 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3388 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3389 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3390 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3396 if ( stream_.doByteSwap[0] )
\r
3397 byteSwapBuffer( stream_.userBuffer[0],
\r
3398 stream_.bufferSize * stream_.nUserChannels[0],
\r
3399 stream_.userFormat );
\r
3401 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3402 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3403 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3404 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3410 // Don't bother draining input
\r
3411 if ( handle->drainCounter ) {
\r
3412 handle->drainCounter++;
\r
3416 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3418 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3420 if (stream_.doConvertBuffer[1]) {
\r
3422 // Always interleave ASIO input data.
\r
3423 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3424 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3425 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3426 handle->bufferInfos[i].buffers[bufferIndex],
\r
3430 if ( stream_.doByteSwap[1] )
\r
3431 byteSwapBuffer( stream_.deviceBuffer,
\r
3432 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3433 stream_.deviceFormat[1] );
\r
3434 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3438 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3439 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3440 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3441 handle->bufferInfos[i].buffers[bufferIndex],
\r
3446 if ( stream_.doByteSwap[1] )
\r
3447 byteSwapBuffer( stream_.userBuffer[1],
\r
3448 stream_.bufferSize * stream_.nUserChannels[1],
\r
3449 stream_.userFormat );
\r
3454 // The following call was suggested by Malte Clasen. While the API
\r
3455 // documentation indicates it should not be required, some device
\r
3456 // drivers apparently do not function correctly without it.
\r
3457 ASIOOutputReady();
\r
3459 RtApi::tickStreamTime();
\r
3463 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3465 // The ASIO documentation says that this usually only happens during
\r
3466 // external sync. Audio processing is not stopped by the driver,
\r
3467 // actual sample rate might not have even changed, maybe only the
\r
3468 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3471 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3473 object->stopStream();
\r
3475 catch ( RtAudioError &exception ) {
\r
3476 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3480 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3483 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3487 switch( selector ) {
\r
3488 case kAsioSelectorSupported:
\r
3489 if ( value == kAsioResetRequest
\r
3490 || value == kAsioEngineVersion
\r
3491 || value == kAsioResyncRequest
\r
3492 || value == kAsioLatenciesChanged
\r
3493 // The following three were added for ASIO 2.0, you don't
\r
3494 // necessarily have to support them.
\r
3495 || value == kAsioSupportsTimeInfo
\r
3496 || value == kAsioSupportsTimeCode
\r
3497 || value == kAsioSupportsInputMonitor)
\r
3500 case kAsioResetRequest:
\r
3501 // Defer the task and perform the reset of the driver during the
\r
3502 // next "safe" situation. You cannot reset the driver right now,
\r
3503 // as this code is called from the driver. Reset the driver is
\r
3504 // done by completely destruct is. I.e. ASIOStop(),
\r
3505 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3507 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3510 case kAsioResyncRequest:
\r
3511 // This informs the application that the driver encountered some
\r
3512 // non-fatal data loss. It is used for synchronization purposes
\r
3513 // of different media. Added mainly to work around the Win16Mutex
\r
3514 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3515 // which could lose data because the Mutex was held too long by
\r
3516 // another thread. However a driver can issue it in other
\r
3517 // situations, too.
\r
3518 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3522 case kAsioLatenciesChanged:
\r
3523 // This will inform the host application that the drivers were
\r
3524 // latencies changed. Beware, it this does not mean that the
\r
3525 // buffer sizes have changed! You might need to update internal
\r
3527 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3530 case kAsioEngineVersion:
\r
3531 // Return the supported ASIO version of the host application. If
\r
3532 // a host application does not implement this selector, ASIO 1.0
\r
3533 // is assumed by the driver.
\r
3536 case kAsioSupportsTimeInfo:
\r
3537 // Informs the driver whether the
\r
3538 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3539 // For compatibility with ASIO 1.0 drivers the host application
\r
3540 // should always support the "old" bufferSwitch method, too.
\r
3543 case kAsioSupportsTimeCode:
\r
3544 // Informs the driver whether application is interested in time
\r
3545 // code info. If an application does not need to know about time
\r
3546 // code, the driver has less work to do.
\r
3553 static const char* getAsioErrorString( ASIOError result )
\r
3558 const char*message;
\r
3561 static const Messages m[] =
\r
3563 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3564 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3565 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3566 { ASE_InvalidMode, "Invalid mode." },
\r
3567 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3568 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3569 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3572 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3573 if ( m[i].value == result ) return m[i].message;
\r
3575 return "Unknown error.";
\r
3578 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3582 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3584 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3585 // - Introduces support for the Windows WASAPI API
\r
3586 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3587 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3588 // - Includes automatic internal conversion of sample rate, buffer size and channel count
\r
3593 #include <audioclient.h>
\r
3595 #include <mmdeviceapi.h>
\r
3596 #include <functiondiscoverykeys_devpkey.h>
\r
3598 //=============================================================================
\r
3600 #define SAFE_RELEASE( objectPtr )\
\r
3603 objectPtr->Release();\
\r
3604 objectPtr = NULL;\
\r
3607 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3609 //-----------------------------------------------------------------------------
\r
3611 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3612 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3613 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3614 // provide intermediate storage for read / write synchronization.
\r
3615 class WasapiBuffer
\r
3619 : buffer_( NULL ),
\r
3628 // sets the length of the internal ring buffer
\r
3629 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3632 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3634 bufferSize_ = bufferSize;
\r
3639 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3640 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3642 if ( !buffer || // incoming buffer is NULL
\r
3643 bufferSize == 0 || // incoming buffer has no data
\r
3644 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3649 unsigned int relOutIndex = outIndex_;
\r
3650 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3651 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3652 relOutIndex += bufferSize_;
\r
3655 // "in" index can end on the "out" index but cannot begin at it
\r
3656 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3657 return false; // not enough space between "in" index and "out" index
\r
3660 // copy buffer from external to internal
\r
3661 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3662 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3663 int fromInSize = bufferSize - fromZeroSize;
\r
3667 case RTAUDIO_SINT8:
\r
3668 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3669 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3671 case RTAUDIO_SINT16:
\r
3672 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3673 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3675 case RTAUDIO_SINT24:
\r
3676 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3677 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3679 case RTAUDIO_SINT32:
\r
3680 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3681 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3683 case RTAUDIO_FLOAT32:
\r
3684 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3685 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3687 case RTAUDIO_FLOAT64:
\r
3688 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3689 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3693 // update "in" index
\r
3694 inIndex_ += bufferSize;
\r
3695 inIndex_ %= bufferSize_;
\r
3700 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3701 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3703 if ( !buffer || // incoming buffer is NULL
\r
3704 bufferSize == 0 || // incoming buffer has no data
\r
3705 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3710 unsigned int relInIndex = inIndex_;
\r
3711 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3712 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3713 relInIndex += bufferSize_;
\r
3716 // "out" index can begin at and end on the "in" index
\r
3717 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3718 return false; // not enough space between "out" index and "in" index
\r
3721 // copy buffer from internal to external
\r
3722 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3723 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3724 int fromOutSize = bufferSize - fromZeroSize;
\r
3728 case RTAUDIO_SINT8:
\r
3729 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3730 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3732 case RTAUDIO_SINT16:
\r
3733 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3734 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3736 case RTAUDIO_SINT24:
\r
3737 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3738 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3740 case RTAUDIO_SINT32:
\r
3741 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3742 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3744 case RTAUDIO_FLOAT32:
\r
3745 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3746 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3748 case RTAUDIO_FLOAT64:
\r
3749 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3750 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3754 // update "out" index
\r
3755 outIndex_ += bufferSize;
\r
3756 outIndex_ %= bufferSize_;
\r
3763 unsigned int bufferSize_;
\r
3764 unsigned int inIndex_;
\r
3765 unsigned int outIndex_;
\r
3768 //-----------------------------------------------------------------------------
\r
3770 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
\r
3771 // channel counts between HW and the user. The convertBufferWasapi function is used to perform
\r
3772 // these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3773 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3774 // one rate and its multiple.
\r
3775 void convertBufferWasapi( char* outBuffer,
\r
3776 const char* inBuffer,
\r
3777 const unsigned int& inChannelCount,
\r
3778 const unsigned int& outChannelCount,
\r
3779 const unsigned int& inSampleRate,
\r
3780 const unsigned int& outSampleRate,
\r
3781 const unsigned int& inSampleCount,
\r
3782 unsigned int& outSampleCount,
\r
3783 const RtAudioFormat& format )
\r
3785 // calculate the new outSampleCount and relative sampleStep
\r
3786 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3787 float sampleStep = 1.0f / sampleRatio;
\r
3788 float inSampleFraction = 0.0f;
\r
3789 unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );
\r
3791 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
\r
3793 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3794 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3796 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3800 case RTAUDIO_SINT8:
\r
3801 memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
\r
3803 case RTAUDIO_SINT16:
\r
3804 memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
\r
3806 case RTAUDIO_SINT24:
\r
3807 memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
\r
3809 case RTAUDIO_SINT32:
\r
3810 memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
\r
3812 case RTAUDIO_FLOAT32:
\r
3813 memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
\r
3815 case RTAUDIO_FLOAT64:
\r
3816 memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
\r
3820 // jump to next in sample
\r
3821 inSampleFraction += sampleStep;
\r
3825 //-----------------------------------------------------------------------------
\r
3827 // A structure to hold various information related to the WASAPI implementation.
\r
3828 struct WasapiHandle
\r
3830 IAudioClient* captureAudioClient;
\r
3831 IAudioClient* renderAudioClient;
\r
3832 IAudioCaptureClient* captureClient;
\r
3833 IAudioRenderClient* renderClient;
\r
3834 HANDLE captureEvent;
\r
3835 HANDLE renderEvent;
\r
3838 : captureAudioClient( NULL ),
\r
3839 renderAudioClient( NULL ),
\r
3840 captureClient( NULL ),
\r
3841 renderClient( NULL ),
\r
3842 captureEvent( NULL ),
\r
3843 renderEvent( NULL ) {}
\r
3846 //=============================================================================
\r
3848 RtApiWasapi::RtApiWasapi()
\r
3849 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3851 // WASAPI can run either apartment or multi-threaded
\r
3852 HRESULT hr = CoInitialize( NULL );
\r
3854 if ( !FAILED( hr ) )
\r
3855 coInitialized_ = true;
\r
3857 // Instantiate device enumerator
\r
3858 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3859 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3860 ( void** ) &deviceEnumerator_ );
\r
3862 if ( FAILED( hr ) ) {
\r
3863 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3864 error( RtAudioError::DRIVER_ERROR );
\r
3868 //-----------------------------------------------------------------------------
\r
3870 RtApiWasapi::~RtApiWasapi()
\r
3872 // if this object previously called CoInitialize()
\r
3873 if ( coInitialized_ ) {
\r
3877 if ( stream_.state != STREAM_CLOSED ) {
\r
3881 SAFE_RELEASE( deviceEnumerator_ );
\r
3884 //=============================================================================
\r
3886 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3888 unsigned int captureDeviceCount = 0;
\r
3889 unsigned int renderDeviceCount = 0;
\r
3891 IMMDeviceCollection* captureDevices = NULL;
\r
3892 IMMDeviceCollection* renderDevices = NULL;
\r
3894 // Count capture devices
\r
3895 errorText_.clear();
\r
3896 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3897 if ( FAILED( hr ) ) {
\r
3898 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3902 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3903 if ( FAILED( hr ) ) {
\r
3904 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3908 // Count render devices
\r
3909 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3910 if ( FAILED( hr ) ) {
\r
3911 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3915 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3916 if ( FAILED( hr ) ) {
\r
3917 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3922 // release all references
\r
3923 SAFE_RELEASE( captureDevices );
\r
3924 SAFE_RELEASE( renderDevices );
\r
3926 if ( errorText_.empty() )
\r
3927 return captureDeviceCount + renderDeviceCount;
\r
3929 error( RtAudioError::DRIVER_ERROR );
\r
3933 //-----------------------------------------------------------------------------
\r
3935 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3937 RtAudio::DeviceInfo info;
\r
3938 unsigned int captureDeviceCount = 0;
\r
3939 unsigned int renderDeviceCount = 0;
\r
3940 std::wstring deviceName;
\r
3941 std::string defaultDeviceName;
\r
3942 bool isCaptureDevice = false;
\r
3944 PROPVARIANT deviceNameProp;
\r
3945 PROPVARIANT defaultDeviceNameProp;
\r
3947 IMMDeviceCollection* captureDevices = NULL;
\r
3948 IMMDeviceCollection* renderDevices = NULL;
\r
3949 IMMDevice* devicePtr = NULL;
\r
3950 IMMDevice* defaultDevicePtr = NULL;
\r
3951 IAudioClient* audioClient = NULL;
\r
3952 IPropertyStore* devicePropStore = NULL;
\r
3953 IPropertyStore* defaultDevicePropStore = NULL;
\r
3955 WAVEFORMATEX* deviceFormat = NULL;
\r
3956 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3959 info.probed = false;
\r
3961 // Count capture devices
\r
3962 errorText_.clear();
\r
3963 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3964 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3965 if ( FAILED( hr ) ) {
\r
3966 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3970 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3971 if ( FAILED( hr ) ) {
\r
3972 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
3976 // Count render devices
\r
3977 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3978 if ( FAILED( hr ) ) {
\r
3979 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
3983 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3984 if ( FAILED( hr ) ) {
\r
3985 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
3989 // validate device index
\r
3990 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
3991 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
3992 errorType = RtAudioError::INVALID_USE;
\r
3996 // determine whether index falls within capture or render devices
\r
3997 if ( device >= renderDeviceCount ) {
\r
3998 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
3999 if ( FAILED( hr ) ) {
\r
4000 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4003 isCaptureDevice = true;
\r
4006 hr = renderDevices->Item( device, &devicePtr );
\r
4007 if ( FAILED( hr ) ) {
\r
4008 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4011 isCaptureDevice = false;
\r
4014 // get default device name
\r
4015 if ( isCaptureDevice ) {
\r
4016 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4017 if ( FAILED( hr ) ) {
\r
4018 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4023 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4024 if ( FAILED( hr ) ) {
\r
4025 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4030 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4031 if ( FAILED( hr ) ) {
\r
4032 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4035 PropVariantInit( &defaultDeviceNameProp );
\r
4037 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4038 if ( FAILED( hr ) ) {
\r
4039 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4043 deviceName = defaultDeviceNameProp.pwszVal;
\r
4044 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
\r
4047 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4048 if ( FAILED( hr ) ) {
\r
4049 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4053 PropVariantInit( &deviceNameProp );
\r
4055 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4056 if ( FAILED( hr ) ) {
\r
4057 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4061 deviceName = deviceNameProp.pwszVal;
\r
4062 info.name = std::string( deviceName.begin(), deviceName.end() );
\r
4065 if ( isCaptureDevice ) {
\r
4066 info.isDefaultInput = info.name == defaultDeviceName;
\r
4067 info.isDefaultOutput = false;
\r
4070 info.isDefaultInput = false;
\r
4071 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4075 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4076 if ( FAILED( hr ) ) {
\r
4077 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4081 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4082 if ( FAILED( hr ) ) {
\r
4083 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4087 if ( isCaptureDevice ) {
\r
4088 info.inputChannels = deviceFormat->nChannels;
\r
4089 info.outputChannels = 0;
\r
4090 info.duplexChannels = 0;
\r
4093 info.inputChannels = 0;
\r
4094 info.outputChannels = deviceFormat->nChannels;
\r
4095 info.duplexChannels = 0;
\r
4099 info.sampleRates.clear();
\r
4101 // allow support for all sample rates as we have a built-in sample rate converter
\r
4102 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4103 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4107 info.nativeFormats = 0;
\r
4109 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4110 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4111 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4113 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4114 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4116 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4117 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4120 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4121 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4122 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4124 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4125 info.nativeFormats |= RTAUDIO_SINT8;
\r
4127 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4128 info.nativeFormats |= RTAUDIO_SINT16;
\r
4130 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4131 info.nativeFormats |= RTAUDIO_SINT24;
\r
4133 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4134 info.nativeFormats |= RTAUDIO_SINT32;
\r
4139 info.probed = true;
\r
4142 // release all references
\r
4143 PropVariantClear( &deviceNameProp );
\r
4144 PropVariantClear( &defaultDeviceNameProp );
\r
4146 SAFE_RELEASE( captureDevices );
\r
4147 SAFE_RELEASE( renderDevices );
\r
4148 SAFE_RELEASE( devicePtr );
\r
4149 SAFE_RELEASE( defaultDevicePtr );
\r
4150 SAFE_RELEASE( audioClient );
\r
4151 SAFE_RELEASE( devicePropStore );
\r
4152 SAFE_RELEASE( defaultDevicePropStore );
\r
4154 CoTaskMemFree( deviceFormat );
\r
4155 CoTaskMemFree( closestMatchFormat );
\r
4157 if ( !errorText_.empty() )
\r
4158 error( errorType );
\r
4162 //-----------------------------------------------------------------------------
\r
4164 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4166 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4167 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4175 //-----------------------------------------------------------------------------
\r
4177 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4179 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4180 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4188 //-----------------------------------------------------------------------------
\r
4190 void RtApiWasapi::closeStream( void )
\r
4192 if ( stream_.state == STREAM_CLOSED ) {
\r
4193 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4194 error( RtAudioError::WARNING );
\r
4198 if ( stream_.state != STREAM_STOPPED )
\r
4201 // clean up stream memory
\r
4202 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4203 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4205 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4206 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4208 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4209 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4211 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4212 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4214 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4215 stream_.apiHandle = NULL;
\r
4217 for ( int i = 0; i < 2; i++ ) {
\r
4218 if ( stream_.userBuffer[i] ) {
\r
4219 free( stream_.userBuffer[i] );
\r
4220 stream_.userBuffer[i] = 0;
\r
4224 if ( stream_.deviceBuffer ) {
\r
4225 free( stream_.deviceBuffer );
\r
4226 stream_.deviceBuffer = 0;
\r
4229 // update stream state
\r
4230 stream_.state = STREAM_CLOSED;
\r
4233 //-----------------------------------------------------------------------------
\r
4235 void RtApiWasapi::startStream( void )
\r
4239 if ( stream_.state == STREAM_RUNNING ) {
\r
4240 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4241 error( RtAudioError::WARNING );
\r
4245 // update stream state
\r
4246 stream_.state = STREAM_RUNNING;
\r
4248 // create WASAPI stream thread
\r
4249 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4251 if ( !stream_.callbackInfo.thread ) {
\r
4252 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4253 error( RtAudioError::THREAD_ERROR );
\r
4256 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4257 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4261 //-----------------------------------------------------------------------------
\r
4263 void RtApiWasapi::stopStream( void )
\r
4267 if ( stream_.state == STREAM_STOPPED ) {
\r
4268 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4269 error( RtAudioError::WARNING );
\r
4273 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4274 stream_.state = STREAM_STOPPING;
\r
4276 // wait until stream thread is stopped
\r
4277 while( stream_.state != STREAM_STOPPED ) {
\r
4281 // Wait for the last buffer to play before stopping.
\r
4282 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4284 // stop capture client if applicable
\r
4285 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4286 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4287 if ( FAILED( hr ) ) {
\r
4288 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4289 error( RtAudioError::DRIVER_ERROR );
\r
4294 // stop render client if applicable
\r
4295 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4296 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4297 if ( FAILED( hr ) ) {
\r
4298 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4299 error( RtAudioError::DRIVER_ERROR );
\r
4304 // close thread handle
\r
4305 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4306 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4307 error( RtAudioError::THREAD_ERROR );
\r
4311 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4314 //-----------------------------------------------------------------------------
\r
4316 void RtApiWasapi::abortStream( void )
\r
4320 if ( stream_.state == STREAM_STOPPED ) {
\r
4321 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4322 error( RtAudioError::WARNING );
\r
4326 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4327 stream_.state = STREAM_STOPPING;
\r
4329 // wait until stream thread is stopped
\r
4330 while ( stream_.state != STREAM_STOPPED ) {
\r
4334 // stop capture client if applicable
\r
4335 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4336 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4337 if ( FAILED( hr ) ) {
\r
4338 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4339 error( RtAudioError::DRIVER_ERROR );
\r
4344 // stop render client if applicable
\r
4345 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4346 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4347 if ( FAILED( hr ) ) {
\r
4348 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4349 error( RtAudioError::DRIVER_ERROR );
\r
4354 // close thread handle
\r
4355 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4356 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4357 error( RtAudioError::THREAD_ERROR );
\r
4361 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4364 //-----------------------------------------------------------------------------
\r
4366 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4367 unsigned int firstChannel, unsigned int sampleRate,
\r
4368 RtAudioFormat format, unsigned int* bufferSize,
\r
4369 RtAudio::StreamOptions* options )
\r
4371 bool methodResult = FAILURE;
\r
4372 unsigned int captureDeviceCount = 0;
\r
4373 unsigned int renderDeviceCount = 0;
\r
4375 IMMDeviceCollection* captureDevices = NULL;
\r
4376 IMMDeviceCollection* renderDevices = NULL;
\r
4377 IMMDevice* devicePtr = NULL;
\r
4378 WAVEFORMATEX* deviceFormat = NULL;
\r
4379 unsigned int bufferBytes;
\r
4380 stream_.state = STREAM_STOPPED;
\r
4382 // create API Handle if not already created
\r
4383 if ( !stream_.apiHandle )
\r
4384 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4386 // Count capture devices
\r
4387 errorText_.clear();
\r
4388 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4389 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4390 if ( FAILED( hr ) ) {
\r
4391 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4395 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4396 if ( FAILED( hr ) ) {
\r
4397 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4401 // Count render devices
\r
4402 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4403 if ( FAILED( hr ) ) {
\r
4404 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4408 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4409 if ( FAILED( hr ) ) {
\r
4410 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4414 // validate device index
\r
4415 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4416 errorType = RtAudioError::INVALID_USE;
\r
4417 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4421 // determine whether index falls within capture or render devices
\r
4422 if ( device >= renderDeviceCount ) {
\r
4423 if ( mode != INPUT ) {
\r
4424 errorType = RtAudioError::INVALID_USE;
\r
4425 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4429 // retrieve captureAudioClient from devicePtr
\r
4430 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4432 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4433 if ( FAILED( hr ) ) {
\r
4434 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4438 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4439 NULL, ( void** ) &captureAudioClient );
\r
4440 if ( FAILED( hr ) ) {
\r
4441 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4445 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4446 if ( FAILED( hr ) ) {
\r
4447 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4451 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4452 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4455 if ( mode != OUTPUT ) {
\r
4456 errorType = RtAudioError::INVALID_USE;
\r
4457 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4461 // retrieve renderAudioClient from devicePtr
\r
4462 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4464 hr = renderDevices->Item( device, &devicePtr );
\r
4465 if ( FAILED( hr ) ) {
\r
4466 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4470 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4471 NULL, ( void** ) &renderAudioClient );
\r
4472 if ( FAILED( hr ) ) {
\r
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4477 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4478 if ( FAILED( hr ) ) {
\r
4479 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4483 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4484 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4487 // fill stream data
\r
4488 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4489 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4490 stream_.mode = DUPLEX;
\r
4493 stream_.mode = mode;
\r
4496 stream_.device[mode] = device;
\r
4497 stream_.doByteSwap[mode] = false;
\r
4498 stream_.sampleRate = sampleRate;
\r
4499 stream_.bufferSize = *bufferSize;
\r
4500 stream_.nBuffers = 1;
\r
4501 stream_.nUserChannels[mode] = channels;
\r
4502 stream_.channelOffset[mode] = firstChannel;
\r
4503 stream_.userFormat = format;
\r
4504 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4506 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4507 stream_.userInterleaved = false;
\r
4509 stream_.userInterleaved = true;
\r
4510 stream_.deviceInterleaved[mode] = true;
\r
4512 // Set flags for buffer conversion.
\r
4513 stream_.doConvertBuffer[mode] = false;
\r
4514 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
4515 stream_.doConvertBuffer[mode] = true;
\r
4516 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4517 stream_.nUserChannels[mode] > 1 )
\r
4518 stream_.doConvertBuffer[mode] = true;
\r
4520 if ( stream_.doConvertBuffer[mode] )
\r
4521 setConvertInfo( mode, 0 );
\r
4523 // Allocate necessary internal buffers
\r
4524 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4526 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4527 if ( !stream_.userBuffer[mode] ) {
\r
4528 errorType = RtAudioError::MEMORY_ERROR;
\r
4529 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4533 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4534 stream_.callbackInfo.priority = 15;
\r
4536 stream_.callbackInfo.priority = 0;
\r
4538 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4539 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4541 methodResult = SUCCESS;
\r
4545 SAFE_RELEASE( captureDevices );
\r
4546 SAFE_RELEASE( renderDevices );
\r
4547 SAFE_RELEASE( devicePtr );
\r
4548 CoTaskMemFree( deviceFormat );
\r
4550 // if method failed, close the stream
\r
4551 if ( methodResult == FAILURE )
\r
4554 if ( !errorText_.empty() )
\r
4555 error( errorType );
\r
4556 return methodResult;
\r
4559 //=============================================================================
\r
4561 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4564 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4569 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4572 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4577 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4580 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4585 //-----------------------------------------------------------------------------
\r
4587 void RtApiWasapi::wasapiThread()
\r
4589 // as this is a new thread, we must CoInitialize it
\r
4590 CoInitialize( NULL );
\r
4594 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4595 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4596 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4597 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4598 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4599 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4601 WAVEFORMATEX* captureFormat = NULL;
\r
4602 WAVEFORMATEX* renderFormat = NULL;
\r
4603 float captureSrRatio = 0.0f;
\r
4604 float renderSrRatio = 0.0f;
\r
4605 WasapiBuffer captureBuffer;
\r
4606 WasapiBuffer renderBuffer;
\r
4608 // declare local stream variables
\r
4609 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4610 BYTE* streamBuffer = NULL;
\r
4611 unsigned long captureFlags = 0;
\r
4612 unsigned int bufferFrameCount = 0;
\r
4613 unsigned int numFramesPadding = 0;
\r
4614 unsigned int convBufferSize = 0;
\r
4615 bool callbackPushed = false;
\r
4616 bool callbackPulled = false;
\r
4617 bool callbackStopped = false;
\r
4618 int callbackResult = 0;
\r
4620 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4621 char* convBuffer = NULL;
\r
4622 unsigned int convBuffSize = 0;
\r
4623 unsigned int deviceBuffSize = 0;
\r
4625 errorText_.clear();
\r
4626 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4628 // Attempt to assign "Pro Audio" characteristic to thread
\r
4629 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4631 DWORD taskIndex = 0;
\r
4632 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4633 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4634 FreeLibrary( AvrtDll );
\r
4637 // start capture stream if applicable
\r
4638 if ( captureAudioClient ) {
\r
4639 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4640 if ( FAILED( hr ) ) {
\r
4641 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4645 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4647 // initialize capture stream according to desire buffer size
\r
4648 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4649 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4651 if ( !captureClient ) {
\r
4652 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4653 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4654 desiredBufferPeriod,
\r
4655 desiredBufferPeriod,
\r
4658 if ( FAILED( hr ) ) {
\r
4659 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4663 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4664 ( void** ) &captureClient );
\r
4665 if ( FAILED( hr ) ) {
\r
4666 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4670 // configure captureEvent to trigger on every available capture buffer
\r
4671 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4672 if ( !captureEvent ) {
\r
4673 errorType = RtAudioError::SYSTEM_ERROR;
\r
4674 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4678 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4679 if ( FAILED( hr ) ) {
\r
4680 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4684 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4685 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4688 unsigned int inBufferSize = 0;
\r
4689 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4690 if ( FAILED( hr ) ) {
\r
4691 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4695 // scale outBufferSize according to stream->user sample rate ratio
\r
4696 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4697 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4699 // set captureBuffer size
\r
4700 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4702 // reset the capture stream
\r
4703 hr = captureAudioClient->Reset();
\r
4704 if ( FAILED( hr ) ) {
\r
4705 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4709 // start the capture stream
\r
4710 hr = captureAudioClient->Start();
\r
4711 if ( FAILED( hr ) ) {
\r
4712 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4717 // start render stream if applicable
\r
4718 if ( renderAudioClient ) {
\r
4719 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4720 if ( FAILED( hr ) ) {
\r
4721 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4725 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4727 // initialize render stream according to desire buffer size
\r
4728 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4729 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4731 if ( !renderClient ) {
\r
4732 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4733 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4734 desiredBufferPeriod,
\r
4735 desiredBufferPeriod,
\r
4738 if ( FAILED( hr ) ) {
\r
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4743 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4744 ( void** ) &renderClient );
\r
4745 if ( FAILED( hr ) ) {
\r
4746 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4750 // configure renderEvent to trigger on every available render buffer
\r
4751 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4752 if ( !renderEvent ) {
\r
4753 errorType = RtAudioError::SYSTEM_ERROR;
\r
4754 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4758 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4759 if ( FAILED( hr ) ) {
\r
4760 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4764 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4765 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4768 unsigned int outBufferSize = 0;
\r
4769 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4770 if ( FAILED( hr ) ) {
\r
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4775 // scale inBufferSize according to user->stream sample rate ratio
\r
4776 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4777 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4779 // set renderBuffer size
\r
4780 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4782 // reset the render stream
\r
4783 hr = renderAudioClient->Reset();
\r
4784 if ( FAILED( hr ) ) {
\r
4785 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4789 // start the render stream
\r
4790 hr = renderAudioClient->Start();
\r
4791 if ( FAILED( hr ) ) {
\r
4792 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4797 if ( stream_.mode == INPUT ) {
\r
4798 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4799 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4801 else if ( stream_.mode == OUTPUT ) {
\r
4802 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4803 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4805 else if ( stream_.mode == DUPLEX ) {
\r
4806 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4807 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4808 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4809 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4812 convBuffer = ( char* ) malloc( convBuffSize );
\r
4813 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4814 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4815 errorType = RtAudioError::MEMORY_ERROR;
\r
4816 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4820 // stream process loop
\r
4821 while ( stream_.state != STREAM_STOPPING ) {
\r
4822 if ( !callbackPulled ) {
\r
4825 // 1. Pull callback buffer from inputBuffer
\r
4826 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4827 // Convert callback buffer to user format
\r
4829 if ( captureAudioClient ) {
\r
4830 // Pull callback buffer from inputBuffer
\r
4831 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4832 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4833 stream_.deviceFormat[INPUT] );
\r
4835 if ( callbackPulled ) {
\r
4836 // Convert callback buffer to user sample rate and channel count
\r
4837 convertBufferWasapi( stream_.deviceBuffer,
\r
4839 stream_.nDeviceChannels[INPUT],
\r
4840 stream_.nUserChannels[INPUT],
\r
4841 captureFormat->nSamplesPerSec,
\r
4842 stream_.sampleRate,
\r
4843 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4845 stream_.deviceFormat[INPUT] );
\r
4847 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4848 // Convert callback buffer to user format
\r
4849 convertBuffer( stream_.userBuffer[INPUT],
\r
4850 stream_.deviceBuffer,
\r
4851 stream_.convertInfo[INPUT] );
\r
4854 // no conversion, simple copy deviceBuffer to userBuffer
\r
4855 memcpy( stream_.userBuffer[INPUT],
\r
4856 stream_.deviceBuffer,
\r
4857 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4862 // if there is no capture stream, set callbackPulled flag
\r
4863 callbackPulled = true;
\r
4866 // Execute Callback
\r
4867 // ================
\r
4868 // 1. Execute user callback method
\r
4869 // 2. Handle return value from callback
\r
4871 // if callback has not requested the stream to stop
\r
4872 if ( callbackPulled && !callbackStopped ) {
\r
4873 // Execute user callback method
\r
4874 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4875 stream_.userBuffer[INPUT],
\r
4876 stream_.bufferSize,
\r
4878 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4879 stream_.callbackInfo.userData );
\r
4881 // Handle return value from callback
\r
4882 if ( callbackResult == 1 ) {
\r
4883 // instantiate a thread to stop this thread
\r
4884 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4885 if ( !threadHandle ) {
\r
4886 errorType = RtAudioError::THREAD_ERROR;
\r
4887 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4890 else if ( !CloseHandle( threadHandle ) ) {
\r
4891 errorType = RtAudioError::THREAD_ERROR;
\r
4892 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4896 callbackStopped = true;
\r
4898 else if ( callbackResult == 2 ) {
\r
4899 // instantiate a thread to stop this thread
\r
4900 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4901 if ( !threadHandle ) {
\r
4902 errorType = RtAudioError::THREAD_ERROR;
\r
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4906 else if ( !CloseHandle( threadHandle ) ) {
\r
4907 errorType = RtAudioError::THREAD_ERROR;
\r
4908 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4912 callbackStopped = true;
\r
4917 // Callback Output
\r
4918 // ===============
\r
4919 // 1. Convert callback buffer to stream format
\r
4920 // 2. Convert callback buffer to stream sample rate and channel count
\r
4921 // 3. Push callback buffer into outputBuffer
\r
4923 if ( renderAudioClient && callbackPulled ) {
\r
4924 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4925 // Convert callback buffer to stream format
\r
4926 convertBuffer( stream_.deviceBuffer,
\r
4927 stream_.userBuffer[OUTPUT],
\r
4928 stream_.convertInfo[OUTPUT] );
\r
4930 // Convert callback buffer to stream sample rate and channel count
\r
4931 convertBufferWasapi( convBuffer,
\r
4932 stream_.deviceBuffer,
\r
4933 stream_.nUserChannels[OUTPUT],
\r
4934 stream_.nDeviceChannels[OUTPUT],
\r
4935 stream_.sampleRate,
\r
4936 renderFormat->nSamplesPerSec,
\r
4937 stream_.bufferSize,
\r
4939 stream_.deviceFormat[OUTPUT] );
\r
4942 // Convert callback buffer to stream sample rate and channel count
\r
4943 convertBufferWasapi( convBuffer,
\r
4944 stream_.userBuffer[OUTPUT],
\r
4945 stream_.nUserChannels[OUTPUT],
\r
4946 stream_.nDeviceChannels[OUTPUT],
\r
4947 stream_.sampleRate,
\r
4948 renderFormat->nSamplesPerSec,
\r
4949 stream_.bufferSize,
\r
4951 stream_.deviceFormat[OUTPUT] );
\r
4954 // Push callback buffer into outputBuffer
\r
4955 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4956 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4957 stream_.deviceFormat[OUTPUT] );
\r
4962 // 1. Get capture buffer from stream
\r
4963 // 2. Push capture buffer into inputBuffer
\r
4964 // 3. If 2. was successful: Release capture buffer
\r
4966 if ( captureAudioClient ) {
\r
4967 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4968 if ( !callbackPulled ) {
\r
4969 WaitForSingleObject( captureEvent, INFINITE );
\r
4972 // Get capture buffer from stream
\r
4973 hr = captureClient->GetBuffer( &streamBuffer,
\r
4974 &bufferFrameCount,
\r
4975 &captureFlags, NULL, NULL );
\r
4976 if ( FAILED( hr ) ) {
\r
4977 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4981 if ( bufferFrameCount != 0 ) {
\r
4982 // Push capture buffer into inputBuffer
\r
4983 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
4984 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
4985 stream_.deviceFormat[INPUT] ) )
\r
4987 // Release capture buffer
\r
4988 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
4989 if ( FAILED( hr ) ) {
\r
4990 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
4996 // Inform WASAPI that capture was unsuccessful
\r
4997 hr = captureClient->ReleaseBuffer( 0 );
\r
4998 if ( FAILED( hr ) ) {
\r
4999 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5006 // Inform WASAPI that capture was unsuccessful
\r
5007 hr = captureClient->ReleaseBuffer( 0 );
\r
5008 if ( FAILED( hr ) ) {
\r
5009 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5017 // 1. Get render buffer from stream
\r
5018 // 2. Pull next buffer from outputBuffer
\r
5019 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5020 // Release render buffer
\r
5022 if ( renderAudioClient ) {
\r
5023 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5024 if ( callbackPulled && !callbackPushed ) {
\r
5025 WaitForSingleObject( renderEvent, INFINITE );
\r
5028 // Get render buffer from stream
\r
5029 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5030 if ( FAILED( hr ) ) {
\r
5031 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5035 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5036 if ( FAILED( hr ) ) {
\r
5037 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5041 bufferFrameCount -= numFramesPadding;
\r
5043 if ( bufferFrameCount != 0 ) {
\r
5044 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5045 if ( FAILED( hr ) ) {
\r
5046 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5050 // Pull next buffer from outputBuffer
\r
5051 // Fill render buffer with next buffer
\r
5052 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5053 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5054 stream_.deviceFormat[OUTPUT] ) )
\r
5056 // Release render buffer
\r
5057 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5058 if ( FAILED( hr ) ) {
\r
5059 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5065 // Inform WASAPI that render was unsuccessful
\r
5066 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5067 if ( FAILED( hr ) ) {
\r
5068 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5075 // Inform WASAPI that render was unsuccessful
\r
5076 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5077 if ( FAILED( hr ) ) {
\r
5078 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5084 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5085 if ( callbackPushed ) {
\r
5086 callbackPulled = false;
\r
5089 // tick stream time
\r
5090 RtApi::tickStreamTime();
\r
5095 CoTaskMemFree( captureFormat );
\r
5096 CoTaskMemFree( renderFormat );
\r
5098 //delete convBuffer;
\r
5099 free ( convBuffer );
\r
5103 // update stream state
\r
5104 stream_.state = STREAM_STOPPED;
\r
5106 if ( errorText_.empty() )
\r
5109 error( errorType );
\r
5112 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5116 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5118 // Modified by Robin Davies, October 2005
\r
5119 // - Improvements to DirectX pointer chasing.
\r
5120 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5121 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5122 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5123 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5125 #include <dsound.h>
\r
5126 #include <assert.h>
\r
5127 #include <algorithm>
\r
5129 #if defined(__MINGW32__)
\r
5130 // missing from latest mingw winapi
\r
5131 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5132 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5133 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5134 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5137 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5139 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5140 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5143 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5145 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5146 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5147 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5148 return pointer >= earlierPointer && pointer < laterPointer;
\r
5151 // A structure to hold various information related to the DirectSound
\r
5152 // API implementation.
\r
5154 unsigned int drainCounter; // Tracks callback counts when draining
\r
5155 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5159 UINT bufferPointer[2];
\r
5160 DWORD dsBufferSize[2];
\r
5161 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5165 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5168 // Declarations for utility functions, callbacks, and structures
\r
5169 // specific to the DirectSound implementation.
\r
5170 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5171 LPCTSTR description,
\r
5173 LPVOID lpContext );
\r
5175 static const char* getErrorString( int code );
\r
5177 static unsigned __stdcall callbackHandler( void *ptr );
\r
5186 : found(false) { validId[0] = false; validId[1] = false; }
\r
5189 struct DsProbeData {
\r
5191 std::vector<struct DsDevice>* dsDevices;
\r
5194 RtApiDs :: RtApiDs()
\r
5196 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5197 // accept whatever the mainline chose for a threading model.
\r
5198 coInitialized_ = false;
\r
5199 HRESULT hr = CoInitialize( NULL );
\r
5200 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5203 RtApiDs :: ~RtApiDs()
\r
5205 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5206 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5209 // The DirectSound default output is always the first device.
\r
5210 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5215 // The DirectSound default input is always the first input device,
\r
5216 // which is the first capture device enumerated.
\r
5217 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5222 unsigned int RtApiDs :: getDeviceCount( void )
\r
5224 // Set query flag for previously found devices to false, so that we
\r
5225 // can check for any devices that have disappeared.
\r
5226 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5227 dsDevices[i].found = false;
\r
5229 // Query DirectSound devices.
\r
5230 struct DsProbeData probeInfo;
\r
5231 probeInfo.isInput = false;
\r
5232 probeInfo.dsDevices = &dsDevices;
\r
5233 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5234 if ( FAILED( result ) ) {
\r
5235 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5236 errorText_ = errorStream_.str();
\r
5237 error( RtAudioError::WARNING );
\r
5240 // Query DirectSoundCapture devices.
\r
5241 probeInfo.isInput = true;
\r
5242 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5243 if ( FAILED( result ) ) {
\r
5244 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5245 errorText_ = errorStream_.str();
\r
5246 error( RtAudioError::WARNING );
\r
5249 // Clean out any devices that may have disappeared.
\r
5250 std::vector< int > indices;
\r
5251 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5252 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5253 //unsigned int nErased = 0;
\r
5254 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5255 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5256 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5258 return static_cast<unsigned int>(dsDevices.size());
\r
5261 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5263 RtAudio::DeviceInfo info;
\r
5264 info.probed = false;
\r
5266 if ( dsDevices.size() == 0 ) {
\r
5267 // Force a query of all devices
\r
5269 if ( dsDevices.size() == 0 ) {
\r
5270 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5271 error( RtAudioError::INVALID_USE );
\r
5276 if ( device >= dsDevices.size() ) {
\r
5277 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5278 error( RtAudioError::INVALID_USE );
\r
5283 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5285 LPDIRECTSOUND output;
\r
5287 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5288 if ( FAILED( result ) ) {
\r
5289 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5290 errorText_ = errorStream_.str();
\r
5291 error( RtAudioError::WARNING );
\r
5295 outCaps.dwSize = sizeof( outCaps );
\r
5296 result = output->GetCaps( &outCaps );
\r
5297 if ( FAILED( result ) ) {
\r
5298 output->Release();
\r
5299 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5300 errorText_ = errorStream_.str();
\r
5301 error( RtAudioError::WARNING );
\r
5305 // Get output channel information.
\r
5306 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5308 // Get sample rate information.
\r
5309 info.sampleRates.clear();
\r
5310 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5311 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5312 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5313 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5316 // Get format information.
\r
5317 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5318 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5320 output->Release();
\r
5322 if ( getDefaultOutputDevice() == device )
\r
5323 info.isDefaultOutput = true;
\r
5325 if ( dsDevices[ device ].validId[1] == false ) {
\r
5326 info.name = dsDevices[ device ].name;
\r
5327 info.probed = true;
\r
5333 LPDIRECTSOUNDCAPTURE input;
\r
5334 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5335 if ( FAILED( result ) ) {
\r
5336 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5337 errorText_ = errorStream_.str();
\r
5338 error( RtAudioError::WARNING );
\r
5343 inCaps.dwSize = sizeof( inCaps );
\r
5344 result = input->GetCaps( &inCaps );
\r
5345 if ( FAILED( result ) ) {
\r
5347 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5348 errorText_ = errorStream_.str();
\r
5349 error( RtAudioError::WARNING );
\r
5353 // Get input channel information.
\r
5354 info.inputChannels = inCaps.dwChannels;
\r
5356 // Get sample rate and format information.
\r
5357 std::vector<unsigned int> rates;
\r
5358 if ( inCaps.dwChannels >= 2 ) {
\r
5359 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5360 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5361 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5362 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5363 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5364 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5365 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5366 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5368 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5369 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5370 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5371 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5372 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5374 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5375 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5376 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5377 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5381 else if ( inCaps.dwChannels == 1 ) {
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5384 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5385 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5386 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5391 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5392 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5397 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5398 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5399 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5400 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5401 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5404 else info.inputChannels = 0; // technically, this would be an error
\r
5408 if ( info.inputChannels == 0 ) return info;
\r
5410 // Copy the supported rates to the info structure but avoid duplication.
\r
5412 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5414 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5415 if ( rates[i] == info.sampleRates[j] ) {
\r
5420 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5422 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5424 // If device opens for both playback and capture, we determine the channels.
\r
5425 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5426 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5428 if ( device == 0 ) info.isDefaultInput = true;
\r
5430 // Copy name and return.
\r
5431 info.name = dsDevices[ device ].name;
\r
5432 info.probed = true;
\r
5436 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5437 unsigned int firstChannel, unsigned int sampleRate,
\r
5438 RtAudioFormat format, unsigned int *bufferSize,
\r
5439 RtAudio::StreamOptions *options )
\r
5441 if ( channels + firstChannel > 2 ) {
\r
5442 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5446 size_t nDevices = dsDevices.size();
\r
5447 if ( nDevices == 0 ) {
\r
5448 // This should not happen because a check is made before this function is called.
\r
5449 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5453 if ( device >= nDevices ) {
\r
5454 // This should not happen because a check is made before this function is called.
\r
5455 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5459 if ( mode == OUTPUT ) {
\r
5460 if ( dsDevices[ device ].validId[0] == false ) {
\r
5461 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5462 errorText_ = errorStream_.str();
\r
5466 else { // mode == INPUT
\r
5467 if ( dsDevices[ device ].validId[1] == false ) {
\r
5468 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5469 errorText_ = errorStream_.str();
\r
5474 // According to a note in PortAudio, using GetDesktopWindow()
\r
5475 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5476 // that occur when the application's window is not the foreground
\r
5477 // window. Also, if the application window closes before the
\r
5478 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5479 // problems when using GetDesktopWindow() but it seems fine now
\r
5480 // (January 2010). I'll leave it commented here.
\r
5481 // HWND hWnd = GetForegroundWindow();
\r
5482 HWND hWnd = GetDesktopWindow();
\r
5484 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5485 // two. This is a judgement call and a value of two is probably too
\r
5486 // low for capture, but it should work for playback.
\r
5488 if ( options ) nBuffers = options->numberOfBuffers;
\r
5489 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5490 if ( nBuffers < 2 ) nBuffers = 3;
\r
5492 // Check the lower range of the user-specified buffer size and set
\r
5493 // (arbitrarily) to a lower bound of 32.
\r
5494 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5496 // Create the wave format structure. The data format setting will
\r
5497 // be determined later.
\r
5498 WAVEFORMATEX waveFormat;
\r
5499 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5500 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5501 waveFormat.nChannels = channels + firstChannel;
\r
5502 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5504 // Determine the device buffer size. By default, we'll use the value
\r
5505 // defined above (32K), but we will grow it to make allowances for
\r
5506 // very large software buffer sizes.
\r
5507 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5508 DWORD dsPointerLeadTime = 0;
\r
5510 void *ohandle = 0, *bhandle = 0;
\r
5512 if ( mode == OUTPUT ) {
\r
5514 LPDIRECTSOUND output;
\r
5515 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5516 if ( FAILED( result ) ) {
\r
5517 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5518 errorText_ = errorStream_.str();
\r
5523 outCaps.dwSize = sizeof( outCaps );
\r
5524 result = output->GetCaps( &outCaps );
\r
5525 if ( FAILED( result ) ) {
\r
5526 output->Release();
\r
5527 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5528 errorText_ = errorStream_.str();
\r
5532 // Check channel information.
\r
5533 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5534 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5535 errorText_ = errorStream_.str();
\r
5539 // Check format information. Use 16-bit format unless not
\r
5540 // supported or user requests 8-bit.
\r
5541 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5542 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5543 waveFormat.wBitsPerSample = 16;
\r
5544 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5547 waveFormat.wBitsPerSample = 8;
\r
5548 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5550 stream_.userFormat = format;
\r
5552 // Update wave format structure and buffer information.
\r
5553 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5554 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5555 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5557 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5558 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5559 dsBufferSize *= 2;
\r
5561 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5562 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5563 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5564 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5565 if ( FAILED( result ) ) {
\r
5566 output->Release();
\r
5567 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5568 errorText_ = errorStream_.str();
\r
5572 // Even though we will write to the secondary buffer, we need to
\r
5573 // access the primary buffer to set the correct output format
\r
5574 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5575 // buffer description.
\r
5576 DSBUFFERDESC bufferDescription;
\r
5577 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5578 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5579 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5581 // Obtain the primary buffer
\r
5582 LPDIRECTSOUNDBUFFER buffer;
\r
5583 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5584 if ( FAILED( result ) ) {
\r
5585 output->Release();
\r
5586 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5587 errorText_ = errorStream_.str();
\r
5591 // Set the primary DS buffer sound format.
\r
5592 result = buffer->SetFormat( &waveFormat );
\r
5593 if ( FAILED( result ) ) {
\r
5594 output->Release();
\r
5595 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5596 errorText_ = errorStream_.str();
\r
5600 // Setup the secondary DS buffer description.
\r
5601 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5602 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5603 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5604 DSBCAPS_GLOBALFOCUS |
\r
5605 DSBCAPS_GETCURRENTPOSITION2 |
\r
5606 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5607 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5608 bufferDescription.lpwfxFormat = &waveFormat;
\r
5610 // Try to create the secondary DS buffer. If that doesn't work,
\r
5611 // try to use software mixing. Otherwise, there's a problem.
\r
5612 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5613 if ( FAILED( result ) ) {
\r
5614 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5615 DSBCAPS_GLOBALFOCUS |
\r
5616 DSBCAPS_GETCURRENTPOSITION2 |
\r
5617 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5618 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5619 if ( FAILED( result ) ) {
\r
5620 output->Release();
\r
5621 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5622 errorText_ = errorStream_.str();
\r
5627 // Get the buffer size ... might be different from what we specified.
\r
5629 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5630 result = buffer->GetCaps( &dsbcaps );
\r
5631 if ( FAILED( result ) ) {
\r
5632 output->Release();
\r
5633 buffer->Release();
\r
5634 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5635 errorText_ = errorStream_.str();
\r
5639 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5641 // Lock the DS buffer
\r
5644 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5645 if ( FAILED( result ) ) {
\r
5646 output->Release();
\r
5647 buffer->Release();
\r
5648 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5649 errorText_ = errorStream_.str();
\r
5653 // Zero the DS buffer
\r
5654 ZeroMemory( audioPtr, dataLen );
\r
5656 // Unlock the DS buffer
\r
5657 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5658 if ( FAILED( result ) ) {
\r
5659 output->Release();
\r
5660 buffer->Release();
\r
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5662 errorText_ = errorStream_.str();
\r
5666 ohandle = (void *) output;
\r
5667 bhandle = (void *) buffer;
\r
5670 if ( mode == INPUT ) {
\r
5672 LPDIRECTSOUNDCAPTURE input;
\r
5673 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5674 if ( FAILED( result ) ) {
\r
5675 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5676 errorText_ = errorStream_.str();
\r
5681 inCaps.dwSize = sizeof( inCaps );
\r
5682 result = input->GetCaps( &inCaps );
\r
5683 if ( FAILED( result ) ) {
\r
5685 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5686 errorText_ = errorStream_.str();
\r
5690 // Check channel information.
\r
5691 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5692 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5696 // Check format information. Use 16-bit format unless user
\r
5697 // requests 8-bit.
\r
5698 DWORD deviceFormats;
\r
5699 if ( channels + firstChannel == 2 ) {
\r
5700 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5701 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5702 waveFormat.wBitsPerSample = 8;
\r
5703 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5705 else { // assume 16-bit is supported
\r
5706 waveFormat.wBitsPerSample = 16;
\r
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5710 else { // channel == 1
\r
5711 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5712 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5713 waveFormat.wBitsPerSample = 8;
\r
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5716 else { // assume 16-bit is supported
\r
5717 waveFormat.wBitsPerSample = 16;
\r
5718 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5721 stream_.userFormat = format;
\r
5723 // Update wave format structure and buffer information.
\r
5724 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5725 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5726 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5728 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5729 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5730 dsBufferSize *= 2;
\r
5732 // Setup the secondary DS buffer description.
\r
5733 DSCBUFFERDESC bufferDescription;
\r
5734 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5735 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5736 bufferDescription.dwFlags = 0;
\r
5737 bufferDescription.dwReserved = 0;
\r
5738 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5739 bufferDescription.lpwfxFormat = &waveFormat;
\r
5741 // Create the capture buffer.
\r
5742 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5743 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5744 if ( FAILED( result ) ) {
\r
5746 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5747 errorText_ = errorStream_.str();
\r
5751 // Get the buffer size ... might be different from what we specified.
\r
5752 DSCBCAPS dscbcaps;
\r
5753 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5754 result = buffer->GetCaps( &dscbcaps );
\r
5755 if ( FAILED( result ) ) {
\r
5757 buffer->Release();
\r
5758 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5759 errorText_ = errorStream_.str();
\r
5763 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5765 // NOTE: We could have a problem here if this is a duplex stream
\r
5766 // and the play and capture hardware buffer sizes are different
\r
5767 // (I'm actually not sure if that is a problem or not).
\r
5768 // Currently, we are not verifying that.
\r
5770 // Lock the capture buffer
\r
5773 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5774 if ( FAILED( result ) ) {
\r
5776 buffer->Release();
\r
5777 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5778 errorText_ = errorStream_.str();
\r
5782 // Zero the buffer
\r
5783 ZeroMemory( audioPtr, dataLen );
\r
5785 // Unlock the buffer
\r
5786 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5787 if ( FAILED( result ) ) {
\r
5789 buffer->Release();
\r
5790 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5791 errorText_ = errorStream_.str();
\r
5795 ohandle = (void *) input;
\r
5796 bhandle = (void *) buffer;
\r
5799 // Set various stream parameters
\r
5800 DsHandle *handle = 0;
\r
5801 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5802 stream_.nUserChannels[mode] = channels;
\r
5803 stream_.bufferSize = *bufferSize;
\r
5804 stream_.channelOffset[mode] = firstChannel;
\r
5805 stream_.deviceInterleaved[mode] = true;
\r
5806 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5807 else stream_.userInterleaved = true;
\r
5809 // Set flag for buffer conversion
\r
5810 stream_.doConvertBuffer[mode] = false;
\r
5811 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5812 stream_.doConvertBuffer[mode] = true;
\r
5813 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5814 stream_.doConvertBuffer[mode] = true;
\r
5815 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5816 stream_.nUserChannels[mode] > 1 )
\r
5817 stream_.doConvertBuffer[mode] = true;
\r
5819 // Allocate necessary internal buffers
\r
5820 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5821 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5822 if ( stream_.userBuffer[mode] == NULL ) {
\r
5823 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5827 if ( stream_.doConvertBuffer[mode] ) {
\r
5829 bool makeBuffer = true;
\r
5830 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5831 if ( mode == INPUT ) {
\r
5832 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5833 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5834 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5838 if ( makeBuffer ) {
\r
5839 bufferBytes *= *bufferSize;
\r
5840 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5841 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5842 if ( stream_.deviceBuffer == NULL ) {
\r
5843 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5849 // Allocate our DsHandle structures for the stream.
\r
5850 if ( stream_.apiHandle == 0 ) {
\r
5852 handle = new DsHandle;
\r
5854 catch ( std::bad_alloc& ) {
\r
5855 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5859 // Create a manual-reset event.
\r
5860 handle->condition = CreateEvent( NULL, // no security
\r
5861 TRUE, // manual-reset
\r
5862 FALSE, // non-signaled initially
\r
5863 NULL ); // unnamed
\r
5864 stream_.apiHandle = (void *) handle;
\r
5867 handle = (DsHandle *) stream_.apiHandle;
\r
5868 handle->id[mode] = ohandle;
\r
5869 handle->buffer[mode] = bhandle;
\r
5870 handle->dsBufferSize[mode] = dsBufferSize;
\r
5871 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5873 stream_.device[mode] = device;
\r
5874 stream_.state = STREAM_STOPPED;
\r
5875 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5876 // We had already set up an output stream.
\r
5877 stream_.mode = DUPLEX;
\r
5879 stream_.mode = mode;
\r
5880 stream_.nBuffers = nBuffers;
\r
5881 stream_.sampleRate = sampleRate;
\r
5883 // Setup the buffer conversion information structure.
\r
5884 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5886 // Setup the callback thread.
\r
5887 if ( stream_.callbackInfo.isRunning == false ) {
\r
5888 unsigned threadId;
\r
5889 stream_.callbackInfo.isRunning = true;
\r
5890 stream_.callbackInfo.object = (void *) this;
\r
5891 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5892 &stream_.callbackInfo, 0, &threadId );
\r
5893 if ( stream_.callbackInfo.thread == 0 ) {
\r
5894 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5898 // Boost DS thread priority
\r
5899 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5905 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5906 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5907 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5908 if ( buffer ) buffer->Release();
\r
5909 object->Release();
\r
5911 if ( handle->buffer[1] ) {
\r
5912 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5913 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5914 if ( buffer ) buffer->Release();
\r
5915 object->Release();
\r
5917 CloseHandle( handle->condition );
\r
5919 stream_.apiHandle = 0;
\r
5922 for ( int i=0; i<2; i++ ) {
\r
5923 if ( stream_.userBuffer[i] ) {
\r
5924 free( stream_.userBuffer[i] );
\r
5925 stream_.userBuffer[i] = 0;
\r
5929 if ( stream_.deviceBuffer ) {
\r
5930 free( stream_.deviceBuffer );
\r
5931 stream_.deviceBuffer = 0;
\r
5934 stream_.state = STREAM_CLOSED;
\r
5938 void RtApiDs :: closeStream()
\r
5940 if ( stream_.state == STREAM_CLOSED ) {
\r
5941 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5942 error( RtAudioError::WARNING );
\r
5946 // Stop the callback thread.
\r
5947 stream_.callbackInfo.isRunning = false;
\r
5948 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5949 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5951 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5953 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5954 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5955 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5958 buffer->Release();
\r
5960 object->Release();
\r
5962 if ( handle->buffer[1] ) {
\r
5963 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5964 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5967 buffer->Release();
\r
5969 object->Release();
\r
5971 CloseHandle( handle->condition );
\r
5973 stream_.apiHandle = 0;
\r
5976 for ( int i=0; i<2; i++ ) {
\r
5977 if ( stream_.userBuffer[i] ) {
\r
5978 free( stream_.userBuffer[i] );
\r
5979 stream_.userBuffer[i] = 0;
\r
5983 if ( stream_.deviceBuffer ) {
\r
5984 free( stream_.deviceBuffer );
\r
5985 stream_.deviceBuffer = 0;
\r
5988 stream_.mode = UNINITIALIZED;
\r
5989 stream_.state = STREAM_CLOSED;
\r
5992 void RtApiDs :: startStream()
\r
5995 if ( stream_.state == STREAM_RUNNING ) {
\r
5996 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
5997 error( RtAudioError::WARNING );
\r
6001 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6003 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6004 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6005 // this is already in effect.
\r
6006 timeBeginPeriod( 1 );
\r
6008 buffersRolling = false;
\r
6009 duplexPrerollBytes = 0;
\r
6011 if ( stream_.mode == DUPLEX ) {
\r
6012 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6013 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6016 HRESULT result = 0;
\r
6017 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6019 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6020 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6021 if ( FAILED( result ) ) {
\r
6022 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6023 errorText_ = errorStream_.str();
\r
6028 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6030 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6031 result = buffer->Start( DSCBSTART_LOOPING );
\r
6032 if ( FAILED( result ) ) {
\r
6033 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6034 errorText_ = errorStream_.str();
\r
6039 handle->drainCounter = 0;
\r
6040 handle->internalDrain = false;
\r
6041 ResetEvent( handle->condition );
\r
6042 stream_.state = STREAM_RUNNING;
\r
6045 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6048 void RtApiDs :: stopStream()
\r
6051 if ( stream_.state == STREAM_STOPPED ) {
\r
6052 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6053 error( RtAudioError::WARNING );
\r
6057 HRESULT result = 0;
\r
6060 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6061 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6062 if ( handle->drainCounter == 0 ) {
\r
6063 handle->drainCounter = 2;
\r
6064 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6067 stream_.state = STREAM_STOPPED;
\r
6069 MUTEX_LOCK( &stream_.mutex );
\r
6071 // Stop the buffer and clear memory
\r
6072 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6073 result = buffer->Stop();
\r
6074 if ( FAILED( result ) ) {
\r
6075 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6076 errorText_ = errorStream_.str();
\r
6080 // Lock the buffer and clear it so that if we start to play again,
\r
6081 // we won't have old data playing.
\r
6082 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6083 if ( FAILED( result ) ) {
\r
6084 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6085 errorText_ = errorStream_.str();
\r
6089 // Zero the DS buffer
\r
6090 ZeroMemory( audioPtr, dataLen );
\r
6092 // Unlock the DS buffer
\r
6093 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6094 if ( FAILED( result ) ) {
\r
6095 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6096 errorText_ = errorStream_.str();
\r
6100 // If we start playing again, we must begin at beginning of buffer.
\r
6101 handle->bufferPointer[0] = 0;
\r
6104 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6105 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6109 stream_.state = STREAM_STOPPED;
\r
6111 if ( stream_.mode != DUPLEX )
\r
6112 MUTEX_LOCK( &stream_.mutex );
\r
6114 result = buffer->Stop();
\r
6115 if ( FAILED( result ) ) {
\r
6116 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6117 errorText_ = errorStream_.str();
\r
6121 // Lock the buffer and clear it so that if we start to play again,
\r
6122 // we won't have old data playing.
\r
6123 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6124 if ( FAILED( result ) ) {
\r
6125 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6126 errorText_ = errorStream_.str();
\r
6130 // Zero the DS buffer
\r
6131 ZeroMemory( audioPtr, dataLen );
\r
6133 // Unlock the DS buffer
\r
6134 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6135 if ( FAILED( result ) ) {
\r
6136 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6137 errorText_ = errorStream_.str();
\r
6141 // If we start recording again, we must begin at beginning of buffer.
\r
6142 handle->bufferPointer[1] = 0;
\r
6146 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6147 MUTEX_UNLOCK( &stream_.mutex );
\r
6149 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6152 void RtApiDs :: abortStream()
\r
6155 if ( stream_.state == STREAM_STOPPED ) {
\r
6156 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6157 error( RtAudioError::WARNING );
\r
6161 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6162 handle->drainCounter = 2;
\r
6167 void RtApiDs :: callbackEvent()
\r
6169 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6170 Sleep( 50 ); // sleep 50 milliseconds
\r
6174 if ( stream_.state == STREAM_CLOSED ) {
\r
6175 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6176 error( RtAudioError::WARNING );
\r
6180 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6181 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6183 // Check if we were draining the stream and signal is finished.
\r
6184 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6186 stream_.state = STREAM_STOPPING;
\r
6187 if ( handle->internalDrain == false )
\r
6188 SetEvent( handle->condition );
\r
6194 // Invoke user callback to get fresh output data UNLESS we are
\r
6195 // draining stream.
\r
6196 if ( handle->drainCounter == 0 ) {
\r
6197 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6198 double streamTime = getStreamTime();
\r
6199 RtAudioStreamStatus status = 0;
\r
6200 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6201 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6202 handle->xrun[0] = false;
\r
6204 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6205 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6206 handle->xrun[1] = false;
\r
6208 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6209 stream_.bufferSize, streamTime, status, info->userData );
\r
6210 if ( cbReturnValue == 2 ) {
\r
6211 stream_.state = STREAM_STOPPING;
\r
6212 handle->drainCounter = 2;
\r
6216 else if ( cbReturnValue == 1 ) {
\r
6217 handle->drainCounter = 1;
\r
6218 handle->internalDrain = true;
\r
6223 DWORD currentWritePointer, safeWritePointer;
\r
6224 DWORD currentReadPointer, safeReadPointer;
\r
6225 UINT nextWritePointer;
\r
6227 LPVOID buffer1 = NULL;
\r
6228 LPVOID buffer2 = NULL;
\r
6229 DWORD bufferSize1 = 0;
\r
6230 DWORD bufferSize2 = 0;
\r
6235 MUTEX_LOCK( &stream_.mutex );
\r
6236 if ( stream_.state == STREAM_STOPPED ) {
\r
6237 MUTEX_UNLOCK( &stream_.mutex );
\r
6241 if ( buffersRolling == false ) {
\r
6242 if ( stream_.mode == DUPLEX ) {
\r
6243 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6245 // It takes a while for the devices to get rolling. As a result,
\r
6246 // there's no guarantee that the capture and write device pointers
\r
6247 // will move in lockstep. Wait here for both devices to start
\r
6248 // rolling, and then set our buffer pointers accordingly.
\r
6249 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6250 // bytes later than the write buffer.
\r
6252 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6253 // take place between the two GetCurrentPosition calls... but I'm
\r
6254 // really not sure how to solve the problem. Temporarily boost to
\r
6255 // Realtime priority, maybe; but I'm not sure what priority the
\r
6256 // DirectSound service threads run at. We *should* be roughly
\r
6257 // within a ms or so of correct.
\r
6259 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6260 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6262 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6264 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6265 if ( FAILED( result ) ) {
\r
6266 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6267 errorText_ = errorStream_.str();
\r
6268 error( RtAudioError::SYSTEM_ERROR );
\r
6271 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6272 if ( FAILED( result ) ) {
\r
6273 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6274 errorText_ = errorStream_.str();
\r
6275 error( RtAudioError::SYSTEM_ERROR );
\r
6279 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6280 if ( FAILED( result ) ) {
\r
6281 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6282 errorText_ = errorStream_.str();
\r
6283 error( RtAudioError::SYSTEM_ERROR );
\r
6286 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6287 if ( FAILED( result ) ) {
\r
6288 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6289 errorText_ = errorStream_.str();
\r
6290 error( RtAudioError::SYSTEM_ERROR );
\r
6293 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6297 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6299 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6300 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6301 handle->bufferPointer[1] = safeReadPointer;
\r
6303 else if ( stream_.mode == OUTPUT ) {
\r
6305 // Set the proper nextWritePosition after initial startup.
\r
6306 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6307 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6308 if ( FAILED( result ) ) {
\r
6309 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6310 errorText_ = errorStream_.str();
\r
6311 error( RtAudioError::SYSTEM_ERROR );
\r
6314 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6315 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6318 buffersRolling = true;
\r
6321 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6323 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6325 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6326 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6327 bufferBytes *= formatBytes( stream_.userFormat );
\r
6328 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6331 // Setup parameters and do buffer conversion if necessary.
\r
6332 if ( stream_.doConvertBuffer[0] ) {
\r
6333 buffer = stream_.deviceBuffer;
\r
6334 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6335 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6336 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6339 buffer = stream_.userBuffer[0];
\r
6340 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6341 bufferBytes *= formatBytes( stream_.userFormat );
\r
6344 // No byte swapping necessary in DirectSound implementation.
\r
6346 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6347 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6349 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6350 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6352 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6353 nextWritePointer = handle->bufferPointer[0];
\r
6355 DWORD endWrite, leadPointer;
\r
6357 // Find out where the read and "safe write" pointers are.
\r
6358 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6359 if ( FAILED( result ) ) {
\r
6360 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6361 errorText_ = errorStream_.str();
\r
6362 error( RtAudioError::SYSTEM_ERROR );
\r
6366 // We will copy our output buffer into the region between
\r
6367 // safeWritePointer and leadPointer. If leadPointer is not
\r
6368 // beyond the next endWrite position, wait until it is.
\r
6369 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6370 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6371 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6372 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6373 endWrite = nextWritePointer + bufferBytes;
\r
6375 // Check whether the entire write region is behind the play pointer.
\r
6376 if ( leadPointer >= endWrite ) break;
\r
6378 // If we are here, then we must wait until the leadPointer advances
\r
6379 // beyond the end of our next write region. We use the
\r
6380 // Sleep() function to suspend operation until that happens.
\r
6381 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6382 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6383 if ( millis < 1.0 ) millis = 1.0;
\r
6384 Sleep( (DWORD) millis );
\r
6387 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6388 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6389 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6390 handle->xrun[0] = true;
\r
6391 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6392 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6393 handle->bufferPointer[0] = nextWritePointer;
\r
6394 endWrite = nextWritePointer + bufferBytes;
\r
6397 // Lock free space in the buffer
\r
6398 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6399 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6400 if ( FAILED( result ) ) {
\r
6401 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6402 errorText_ = errorStream_.str();
\r
6403 error( RtAudioError::SYSTEM_ERROR );
\r
6407 // Copy our buffer into the DS buffer
\r
6408 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6409 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6411 // Update our buffer offset and unlock sound buffer
\r
6412 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6413 if ( FAILED( result ) ) {
\r
6414 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6415 errorText_ = errorStream_.str();
\r
6416 error( RtAudioError::SYSTEM_ERROR );
\r
6419 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6420 handle->bufferPointer[0] = nextWritePointer;
\r
6423 // Don't bother draining input
\r
6424 if ( handle->drainCounter ) {
\r
6425 handle->drainCounter++;
\r
6429 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6431 // Setup parameters.
\r
6432 if ( stream_.doConvertBuffer[1] ) {
\r
6433 buffer = stream_.deviceBuffer;
\r
6434 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6435 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6438 buffer = stream_.userBuffer[1];
\r
6439 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6440 bufferBytes *= formatBytes( stream_.userFormat );
\r
6443 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6444 long nextReadPointer = handle->bufferPointer[1];
\r
6445 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6447 // Find out where the write and "safe read" pointers are.
\r
6448 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6449 if ( FAILED( result ) ) {
\r
6450 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6451 errorText_ = errorStream_.str();
\r
6452 error( RtAudioError::SYSTEM_ERROR );
\r
6456 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6457 DWORD endRead = nextReadPointer + bufferBytes;
\r
6459 // Handling depends on whether we are INPUT or DUPLEX.
\r
6460 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6461 // then a wait here will drag the write pointers into the forbidden zone.
\r
6463 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6464 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6465 // practical way to sync up the read and write pointers reliably, given the
\r
6466 // the very complex relationship between phase and increment of the read and write
\r
6469 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6470 // provide a pre-roll period of 0.5 seconds in which we return
\r
6471 // zeros from the read buffer while the pointers sync up.
\r
6473 if ( stream_.mode == DUPLEX ) {
\r
6474 if ( safeReadPointer < endRead ) {
\r
6475 if ( duplexPrerollBytes <= 0 ) {
\r
6476 // Pre-roll time over. Be more agressive.
\r
6477 int adjustment = endRead-safeReadPointer;
\r
6479 handle->xrun[1] = true;
\r
6481 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6482 // and perform fine adjustments later.
\r
6483 // - small adjustments: back off by twice as much.
\r
6484 if ( adjustment >= 2*bufferBytes )
\r
6485 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6487 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6489 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6493 // In pre=roll time. Just do it.
\r
6494 nextReadPointer = safeReadPointer - bufferBytes;
\r
6495 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6497 endRead = nextReadPointer + bufferBytes;
\r
6500 else { // mode == INPUT
\r
6501 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6502 // See comments for playback.
\r
6503 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6504 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6505 if ( millis < 1.0 ) millis = 1.0;
\r
6506 Sleep( (DWORD) millis );
\r
6508 // Wake up and find out where we are now.
\r
6509 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6510 if ( FAILED( result ) ) {
\r
6511 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6512 errorText_ = errorStream_.str();
\r
6513 error( RtAudioError::SYSTEM_ERROR );
\r
6517 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6521 // Lock free space in the buffer
\r
6522 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6523 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6524 if ( FAILED( result ) ) {
\r
6525 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6526 errorText_ = errorStream_.str();
\r
6527 error( RtAudioError::SYSTEM_ERROR );
\r
6531 if ( duplexPrerollBytes <= 0 ) {
\r
6532 // Copy our buffer into the DS buffer
\r
6533 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6534 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6537 memset( buffer, 0, bufferSize1 );
\r
6538 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6539 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6542 // Update our buffer offset and unlock sound buffer
\r
6543 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6544 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6545 if ( FAILED( result ) ) {
\r
6546 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6547 errorText_ = errorStream_.str();
\r
6548 error( RtAudioError::SYSTEM_ERROR );
\r
6551 handle->bufferPointer[1] = nextReadPointer;
\r
6553 // No byte swapping necessary in DirectSound implementation.
\r
6555 // If necessary, convert 8-bit data from unsigned to signed.
\r
6556 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6557 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6559 // Do buffer conversion if necessary.
\r
6560 if ( stream_.doConvertBuffer[1] )
\r
6561 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6565 MUTEX_UNLOCK( &stream_.mutex );
\r
6566 RtApi::tickStreamTime();
\r
6569 // Definitions for utility functions and callbacks
\r
6570 // specific to the DirectSound implementation.
\r
6572 static unsigned __stdcall callbackHandler( void *ptr )
\r
6574 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6575 RtApiDs *object = (RtApiDs *) info->object;
\r
6576 bool* isRunning = &info->isRunning;
\r
6578 while ( *isRunning == true ) {
\r
6579 object->callbackEvent();
\r
6582 _endthreadex( 0 );
\r
6586 #include "tchar.h"
\r
6588 static std::string convertTChar( LPCTSTR name )
\r
6590 #if defined( UNICODE ) || defined( _UNICODE )
\r
6591 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
6592 std::string s( length-1, '\0' );
\r
6593 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
6595 std::string s( name );
\r
6601 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6602 LPCTSTR description,
\r
6603 LPCTSTR /*module*/,
\r
6604 LPVOID lpContext )
\r
6606 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6607 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6610 bool validDevice = false;
\r
6611 if ( probeInfo.isInput == true ) {
\r
6613 LPDIRECTSOUNDCAPTURE object;
\r
6615 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6616 if ( hr != DS_OK ) return TRUE;
\r
6618 caps.dwSize = sizeof(caps);
\r
6619 hr = object->GetCaps( &caps );
\r
6620 if ( hr == DS_OK ) {
\r
6621 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6622 validDevice = true;
\r
6624 object->Release();
\r
6628 LPDIRECTSOUND object;
\r
6629 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6630 if ( hr != DS_OK ) return TRUE;
\r
6632 caps.dwSize = sizeof(caps);
\r
6633 hr = object->GetCaps( &caps );
\r
6634 if ( hr == DS_OK ) {
\r
6635 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6636 validDevice = true;
\r
6638 object->Release();
\r
6641 // If good device, then save its name and guid.
\r
6642 std::string name = convertTChar( description );
\r
6643 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6644 if ( lpguid == NULL )
\r
6645 name = "Default Device";
\r
6646 if ( validDevice ) {
\r
6647 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6648 if ( dsDevices[i].name == name ) {
\r
6649 dsDevices[i].found = true;
\r
6650 if ( probeInfo.isInput ) {
\r
6651 dsDevices[i].id[1] = lpguid;
\r
6652 dsDevices[i].validId[1] = true;
\r
6655 dsDevices[i].id[0] = lpguid;
\r
6656 dsDevices[i].validId[0] = true;
\r
6663 device.name = name;
\r
6664 device.found = true;
\r
6665 if ( probeInfo.isInput ) {
\r
6666 device.id[1] = lpguid;
\r
6667 device.validId[1] = true;
\r
6670 device.id[0] = lpguid;
\r
6671 device.validId[0] = true;
\r
6673 dsDevices.push_back( device );
\r
6679 static const char* getErrorString( int code )
\r
6683 case DSERR_ALLOCATED:
\r
6684 return "Already allocated";
\r
6686 case DSERR_CONTROLUNAVAIL:
\r
6687 return "Control unavailable";
\r
6689 case DSERR_INVALIDPARAM:
\r
6690 return "Invalid parameter";
\r
6692 case DSERR_INVALIDCALL:
\r
6693 return "Invalid call";
\r
6695 case DSERR_GENERIC:
\r
6696 return "Generic error";
\r
6698 case DSERR_PRIOLEVELNEEDED:
\r
6699 return "Priority level needed";
\r
6701 case DSERR_OUTOFMEMORY:
\r
6702 return "Out of memory";
\r
6704 case DSERR_BADFORMAT:
\r
6705 return "The sample rate or the channel format is not supported";
\r
6707 case DSERR_UNSUPPORTED:
\r
6708 return "Not supported";
\r
6710 case DSERR_NODRIVER:
\r
6711 return "No driver";
\r
6713 case DSERR_ALREADYINITIALIZED:
\r
6714 return "Already initialized";
\r
6716 case DSERR_NOAGGREGATION:
\r
6717 return "No aggregation";
\r
6719 case DSERR_BUFFERLOST:
\r
6720 return "Buffer lost";
\r
6722 case DSERR_OTHERAPPHASPRIO:
\r
6723 return "Another application already has priority";
\r
6725 case DSERR_UNINITIALIZED:
\r
6726 return "Uninitialized";
\r
6729 return "DirectSound unknown error";
\r
6732 //******************** End of __WINDOWS_DS__ *********************//
\r
6736 #if defined(__LINUX_ALSA__)
\r
6738 #include <alsa/asoundlib.h>
\r
6739 #include <unistd.h>
\r
6741 // A structure to hold various information related to the ALSA API
\r
6742 // implementation.
\r
6743 struct AlsaHandle {
\r
6744 snd_pcm_t *handles[2];
\r
6745 bool synchronized;
\r
6747 pthread_cond_t runnable_cv;
\r
6751 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6754 static void *alsaCallbackHandler( void * ptr );
\r
6756 RtApiAlsa :: RtApiAlsa()
\r
6758 // Nothing to do here.
\r
6761 RtApiAlsa :: ~RtApiAlsa()
\r
6763 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6766 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6768 unsigned nDevices = 0;
\r
6769 int result, subdevice, card;
\r
6771 snd_ctl_t *handle;
\r
6773 // Count cards and devices
\r
6775 snd_card_next( &card );
\r
6776 while ( card >= 0 ) {
\r
6777 sprintf( name, "hw:%d", card );
\r
6778 result = snd_ctl_open( &handle, name, 0 );
\r
6779 if ( result < 0 ) {
\r
6780 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6781 errorText_ = errorStream_.str();
\r
6782 error( RtAudioError::WARNING );
\r
6787 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6788 if ( result < 0 ) {
\r
6789 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6790 errorText_ = errorStream_.str();
\r
6791 error( RtAudioError::WARNING );
\r
6794 if ( subdevice < 0 )
\r
6799 snd_ctl_close( handle );
\r
6800 snd_card_next( &card );
\r
6803 result = snd_ctl_open( &handle, "default", 0 );
\r
6804 if (result == 0) {
\r
6806 snd_ctl_close( handle );
\r
6812 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6814 RtAudio::DeviceInfo info;
\r
6815 info.probed = false;
\r
6817 unsigned nDevices = 0;
\r
6818 int result, subdevice, card;
\r
6820 snd_ctl_t *chandle;
\r
6822 // Count cards and devices
\r
6824 snd_card_next( &card );
\r
6825 while ( card >= 0 ) {
\r
6826 sprintf( name, "hw:%d", card );
\r
6827 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6828 if ( result < 0 ) {
\r
6829 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6830 errorText_ = errorStream_.str();
\r
6831 error( RtAudioError::WARNING );
\r
6836 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6837 if ( result < 0 ) {
\r
6838 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6839 errorText_ = errorStream_.str();
\r
6840 error( RtAudioError::WARNING );
\r
6843 if ( subdevice < 0 ) break;
\r
6844 if ( nDevices == device ) {
\r
6845 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6851 snd_ctl_close( chandle );
\r
6852 snd_card_next( &card );
\r
6855 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6856 if ( result == 0 ) {
\r
6857 if ( nDevices == device ) {
\r
6858 strcpy( name, "default" );
\r
6864 if ( nDevices == 0 ) {
\r
6865 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6866 error( RtAudioError::INVALID_USE );
\r
6870 if ( device >= nDevices ) {
\r
6871 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6872 error( RtAudioError::INVALID_USE );
\r
6878 // If a stream is already open, we cannot probe the stream devices.
\r
6879 // Thus, use the saved results.
\r
6880 if ( stream_.state != STREAM_CLOSED &&
\r
6881 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6882 snd_ctl_close( chandle );
\r
6883 if ( device >= devices_.size() ) {
\r
6884 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6885 error( RtAudioError::WARNING );
\r
6888 return devices_[ device ];
\r
6891 int openMode = SND_PCM_ASYNC;
\r
6892 snd_pcm_stream_t stream;
\r
6893 snd_pcm_info_t *pcminfo;
\r
6894 snd_pcm_info_alloca( &pcminfo );
\r
6895 snd_pcm_t *phandle;
\r
6896 snd_pcm_hw_params_t *params;
\r
6897 snd_pcm_hw_params_alloca( ¶ms );
\r
6899 // First try for playback unless default device (which has subdev -1)
\r
6900 stream = SND_PCM_STREAM_PLAYBACK;
\r
6901 snd_pcm_info_set_stream( pcminfo, stream );
\r
6902 if ( subdevice != -1 ) {
\r
6903 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6904 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6906 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6907 if ( result < 0 ) {
\r
6908 // Device probably doesn't support playback.
\r
6909 goto captureProbe;
\r
6913 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6914 if ( result < 0 ) {
\r
6915 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6916 errorText_ = errorStream_.str();
\r
6917 error( RtAudioError::WARNING );
\r
6918 goto captureProbe;
\r
6921 // The device is open ... fill the parameter structure.
\r
6922 result = snd_pcm_hw_params_any( phandle, params );
\r
6923 if ( result < 0 ) {
\r
6924 snd_pcm_close( phandle );
\r
6925 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6926 errorText_ = errorStream_.str();
\r
6927 error( RtAudioError::WARNING );
\r
6928 goto captureProbe;
\r
6931 // Get output channel information.
\r
6932 unsigned int value;
\r
6933 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6934 if ( result < 0 ) {
\r
6935 snd_pcm_close( phandle );
\r
6936 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6937 errorText_ = errorStream_.str();
\r
6938 error( RtAudioError::WARNING );
\r
6939 goto captureProbe;
\r
6941 info.outputChannels = value;
\r
6942 snd_pcm_close( phandle );
\r
6945 stream = SND_PCM_STREAM_CAPTURE;
\r
6946 snd_pcm_info_set_stream( pcminfo, stream );
\r
6948 // Now try for capture unless default device (with subdev = -1)
\r
6949 if ( subdevice != -1 ) {
\r
6950 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6951 snd_ctl_close( chandle );
\r
6952 if ( result < 0 ) {
\r
6953 // Device probably doesn't support capture.
\r
6954 if ( info.outputChannels == 0 ) return info;
\r
6955 goto probeParameters;
\r
6959 snd_ctl_close( chandle );
\r
6961 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6962 if ( result < 0 ) {
\r
6963 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6964 errorText_ = errorStream_.str();
\r
6965 error( RtAudioError::WARNING );
\r
6966 if ( info.outputChannels == 0 ) return info;
\r
6967 goto probeParameters;
\r
6970 // The device is open ... fill the parameter structure.
\r
6971 result = snd_pcm_hw_params_any( phandle, params );
\r
6972 if ( result < 0 ) {
\r
6973 snd_pcm_close( phandle );
\r
6974 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6975 errorText_ = errorStream_.str();
\r
6976 error( RtAudioError::WARNING );
\r
6977 if ( info.outputChannels == 0 ) return info;
\r
6978 goto probeParameters;
\r
6981 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6982 if ( result < 0 ) {
\r
6983 snd_pcm_close( phandle );
\r
6984 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6985 errorText_ = errorStream_.str();
\r
6986 error( RtAudioError::WARNING );
\r
6987 if ( info.outputChannels == 0 ) return info;
\r
6988 goto probeParameters;
\r
6990 info.inputChannels = value;
\r
6991 snd_pcm_close( phandle );
\r
6993 // If device opens for both playback and capture, we determine the channels.
\r
6994 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6995 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6997 // ALSA doesn't provide default devices so we'll use the first available one.
\r
6998 if ( device == 0 && info.outputChannels > 0 )
\r
6999 info.isDefaultOutput = true;
\r
7000 if ( device == 0 && info.inputChannels > 0 )
\r
7001 info.isDefaultInput = true;
\r
7004 // At this point, we just need to figure out the supported data
\r
7005 // formats and sample rates. We'll proceed by opening the device in
\r
7006 // the direction with the maximum number of channels, or playback if
\r
7007 // they are equal. This might limit our sample rate options, but so
\r
7010 if ( info.outputChannels >= info.inputChannels )
\r
7011 stream = SND_PCM_STREAM_PLAYBACK;
\r
7013 stream = SND_PCM_STREAM_CAPTURE;
\r
7014 snd_pcm_info_set_stream( pcminfo, stream );
\r
7016 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7017 if ( result < 0 ) {
\r
7018 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7019 errorText_ = errorStream_.str();
\r
7020 error( RtAudioError::WARNING );
\r
7024 // The device is open ... fill the parameter structure.
\r
7025 result = snd_pcm_hw_params_any( phandle, params );
\r
7026 if ( result < 0 ) {
\r
7027 snd_pcm_close( phandle );
\r
7028 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7029 errorText_ = errorStream_.str();
\r
7030 error( RtAudioError::WARNING );
\r
7034 // Test our discrete set of sample rate values.
\r
7035 info.sampleRates.clear();
\r
7036 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7037 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7038 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7040 if ( info.sampleRates.size() == 0 ) {
\r
7041 snd_pcm_close( phandle );
\r
7042 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7043 errorText_ = errorStream_.str();
\r
7044 error( RtAudioError::WARNING );
\r
7048 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7049 snd_pcm_format_t format;
\r
7050 info.nativeFormats = 0;
\r
7051 format = SND_PCM_FORMAT_S8;
\r
7052 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7053 info.nativeFormats |= RTAUDIO_SINT8;
\r
7054 format = SND_PCM_FORMAT_S16;
\r
7055 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7056 info.nativeFormats |= RTAUDIO_SINT16;
\r
7057 format = SND_PCM_FORMAT_S24;
\r
7058 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7059 info.nativeFormats |= RTAUDIO_SINT24;
\r
7060 format = SND_PCM_FORMAT_S32;
\r
7061 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7062 info.nativeFormats |= RTAUDIO_SINT32;
\r
7063 format = SND_PCM_FORMAT_FLOAT;
\r
7064 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7065 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7066 format = SND_PCM_FORMAT_FLOAT64;
\r
7067 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7068 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7070 // Check that we have at least one supported format
\r
7071 if ( info.nativeFormats == 0 ) {
\r
7072 snd_pcm_close( phandle );
\r
7073 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7074 errorText_ = errorStream_.str();
\r
7075 error( RtAudioError::WARNING );
\r
7079 // Get the device name
\r
7081 result = snd_card_get_name( card, &cardname );
\r
7082 if ( result >= 0 ) {
\r
7083 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7088 // That's all ... close the device and return
\r
7089 snd_pcm_close( phandle );
\r
7090 info.probed = true;
\r
7094 void RtApiAlsa :: saveDeviceInfo( void )
\r
7098 unsigned int nDevices = getDeviceCount();
\r
7099 devices_.resize( nDevices );
\r
7100 for ( unsigned int i=0; i<nDevices; i++ )
\r
7101 devices_[i] = getDeviceInfo( i );
\r
7104 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7105 unsigned int firstChannel, unsigned int sampleRate,
\r
7106 RtAudioFormat format, unsigned int *bufferSize,
\r
7107 RtAudio::StreamOptions *options )
\r
7110 #if defined(__RTAUDIO_DEBUG__)
\r
7111 snd_output_t *out;
\r
7112 snd_output_stdio_attach(&out, stderr, 0);
\r
7115 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7117 unsigned nDevices = 0;
\r
7118 int result, subdevice, card;
\r
7120 snd_ctl_t *chandle;
\r
7122 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7123 snprintf(name, sizeof(name), "%s", "default");
\r
7125 // Count cards and devices
\r
7127 snd_card_next( &card );
\r
7128 while ( card >= 0 ) {
\r
7129 sprintf( name, "hw:%d", card );
\r
7130 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7131 if ( result < 0 ) {
\r
7132 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7133 errorText_ = errorStream_.str();
\r
7138 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7139 if ( result < 0 ) break;
\r
7140 if ( subdevice < 0 ) break;
\r
7141 if ( nDevices == device ) {
\r
7142 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7143 snd_ctl_close( chandle );
\r
7148 snd_ctl_close( chandle );
\r
7149 snd_card_next( &card );
\r
7152 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7153 if ( result == 0 ) {
\r
7154 if ( nDevices == device ) {
\r
7155 strcpy( name, "default" );
\r
7161 if ( nDevices == 0 ) {
\r
7162 // This should not happen because a check is made before this function is called.
\r
7163 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7167 if ( device >= nDevices ) {
\r
7168 // This should not happen because a check is made before this function is called.
\r
7169 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7176 // The getDeviceInfo() function will not work for a device that is
\r
7177 // already open. Thus, we'll probe the system before opening a
\r
7178 // stream and save the results for use by getDeviceInfo().
\r
7179 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7180 this->saveDeviceInfo();
\r
7182 snd_pcm_stream_t stream;
\r
7183 if ( mode == OUTPUT )
\r
7184 stream = SND_PCM_STREAM_PLAYBACK;
\r
7186 stream = SND_PCM_STREAM_CAPTURE;
\r
7188 snd_pcm_t *phandle;
\r
7189 int openMode = SND_PCM_ASYNC;
\r
7190 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7191 if ( result < 0 ) {
\r
7192 if ( mode == OUTPUT )
\r
7193 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7195 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7196 errorText_ = errorStream_.str();
\r
7200 // Fill the parameter structure.
\r
7201 snd_pcm_hw_params_t *hw_params;
\r
7202 snd_pcm_hw_params_alloca( &hw_params );
\r
7203 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7204 if ( result < 0 ) {
\r
7205 snd_pcm_close( phandle );
\r
7206 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7207 errorText_ = errorStream_.str();
\r
7211 #if defined(__RTAUDIO_DEBUG__)
\r
7212 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7213 snd_pcm_hw_params_dump( hw_params, out );
\r
7216 // Set access ... check user preference.
\r
7217 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7218 stream_.userInterleaved = false;
\r
7219 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7220 if ( result < 0 ) {
\r
7221 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7222 stream_.deviceInterleaved[mode] = true;
\r
7225 stream_.deviceInterleaved[mode] = false;
\r
7228 stream_.userInterleaved = true;
\r
7229 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7230 if ( result < 0 ) {
\r
7231 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7232 stream_.deviceInterleaved[mode] = false;
\r
7235 stream_.deviceInterleaved[mode] = true;
\r
7238 if ( result < 0 ) {
\r
7239 snd_pcm_close( phandle );
\r
7240 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7241 errorText_ = errorStream_.str();
\r
7245 // Determine how to set the device format.
\r
7246 stream_.userFormat = format;
\r
7247 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7249 if ( format == RTAUDIO_SINT8 )
\r
7250 deviceFormat = SND_PCM_FORMAT_S8;
\r
7251 else if ( format == RTAUDIO_SINT16 )
\r
7252 deviceFormat = SND_PCM_FORMAT_S16;
\r
7253 else if ( format == RTAUDIO_SINT24 )
\r
7254 deviceFormat = SND_PCM_FORMAT_S24;
\r
7255 else if ( format == RTAUDIO_SINT32 )
\r
7256 deviceFormat = SND_PCM_FORMAT_S32;
\r
7257 else if ( format == RTAUDIO_FLOAT32 )
\r
7258 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7259 else if ( format == RTAUDIO_FLOAT64 )
\r
7260 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7262 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7263 stream_.deviceFormat[mode] = format;
\r
7267 // The user requested format is not natively supported by the device.
\r
7268 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7269 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7270 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7274 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7275 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7276 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7280 deviceFormat = SND_PCM_FORMAT_S32;
\r
7281 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7282 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7286 deviceFormat = SND_PCM_FORMAT_S24;
\r
7287 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7288 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7292 deviceFormat = SND_PCM_FORMAT_S16;
\r
7293 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7294 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7298 deviceFormat = SND_PCM_FORMAT_S8;
\r
7299 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7300 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7304 // If we get here, no supported format was found.
\r
7305 snd_pcm_close( phandle );
\r
7306 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7307 errorText_ = errorStream_.str();
\r
7311 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7312 if ( result < 0 ) {
\r
7313 snd_pcm_close( phandle );
\r
7314 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7315 errorText_ = errorStream_.str();
\r
7319 // Determine whether byte-swaping is necessary.
\r
7320 stream_.doByteSwap[mode] = false;
\r
7321 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7322 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7323 if ( result == 0 )
\r
7324 stream_.doByteSwap[mode] = true;
\r
7325 else if (result < 0) {
\r
7326 snd_pcm_close( phandle );
\r
7327 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7328 errorText_ = errorStream_.str();
\r
7333 // Set the sample rate.
\r
7334 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7335 if ( result < 0 ) {
\r
7336 snd_pcm_close( phandle );
\r
7337 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7338 errorText_ = errorStream_.str();
\r
7342 // Determine the number of channels for this device. We support a possible
\r
7343 // minimum device channel number > than the value requested by the user.
\r
7344 stream_.nUserChannels[mode] = channels;
\r
7345 unsigned int value;
\r
7346 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7347 unsigned int deviceChannels = value;
\r
7348 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7349 snd_pcm_close( phandle );
\r
7350 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7351 errorText_ = errorStream_.str();
\r
7355 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7356 if ( result < 0 ) {
\r
7357 snd_pcm_close( phandle );
\r
7358 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7359 errorText_ = errorStream_.str();
\r
7362 deviceChannels = value;
\r
7363 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7364 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7366 // Set the device channels.
\r
7367 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7368 if ( result < 0 ) {
\r
7369 snd_pcm_close( phandle );
\r
7370 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7371 errorText_ = errorStream_.str();
\r
7375 // Set the buffer (or period) size.
\r
7377 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7378 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7379 if ( result < 0 ) {
\r
7380 snd_pcm_close( phandle );
\r
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7382 errorText_ = errorStream_.str();
\r
7385 *bufferSize = periodSize;
\r
7387 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7388 unsigned int periods = 0;
\r
7389 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7390 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7391 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7392 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7393 if ( result < 0 ) {
\r
7394 snd_pcm_close( phandle );
\r
7395 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7396 errorText_ = errorStream_.str();
\r
7400 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7401 // MUST be the same in both directions!
\r
7402 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7403 snd_pcm_close( phandle );
\r
7404 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7405 errorText_ = errorStream_.str();
\r
7409 stream_.bufferSize = *bufferSize;
\r
7411 // Install the hardware configuration
\r
7412 result = snd_pcm_hw_params( phandle, hw_params );
\r
7413 if ( result < 0 ) {
\r
7414 snd_pcm_close( phandle );
\r
7415 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7416 errorText_ = errorStream_.str();
\r
7420 #if defined(__RTAUDIO_DEBUG__)
\r
7421 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7422 snd_pcm_hw_params_dump( hw_params, out );
\r
7425 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7426 snd_pcm_sw_params_t *sw_params = NULL;
\r
7427 snd_pcm_sw_params_alloca( &sw_params );
\r
7428 snd_pcm_sw_params_current( phandle, sw_params );
\r
7429 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7430 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7431 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7433 // The following two settings were suggested by Theo Veenker
\r
7434 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7435 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7437 // here are two options for a fix
\r
7438 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7439 snd_pcm_uframes_t val;
\r
7440 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7441 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7443 result = snd_pcm_sw_params( phandle, sw_params );
\r
7444 if ( result < 0 ) {
\r
7445 snd_pcm_close( phandle );
\r
7446 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7447 errorText_ = errorStream_.str();
\r
7451 #if defined(__RTAUDIO_DEBUG__)
\r
7452 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7453 snd_pcm_sw_params_dump( sw_params, out );
\r
7456 // Set flags for buffer conversion
\r
7457 stream_.doConvertBuffer[mode] = false;
\r
7458 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7459 stream_.doConvertBuffer[mode] = true;
\r
7460 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7461 stream_.doConvertBuffer[mode] = true;
\r
7462 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7463 stream_.nUserChannels[mode] > 1 )
\r
7464 stream_.doConvertBuffer[mode] = true;
\r
7466 // Allocate the ApiHandle if necessary and then save.
\r
7467 AlsaHandle *apiInfo = 0;
\r
7468 if ( stream_.apiHandle == 0 ) {
\r
7470 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7472 catch ( std::bad_alloc& ) {
\r
7473 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7477 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7478 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7482 stream_.apiHandle = (void *) apiInfo;
\r
7483 apiInfo->handles[0] = 0;
\r
7484 apiInfo->handles[1] = 0;
\r
7487 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7489 apiInfo->handles[mode] = phandle;
\r
7492 // Allocate necessary internal buffers.
\r
7493 unsigned long bufferBytes;
\r
7494 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7495 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7496 if ( stream_.userBuffer[mode] == NULL ) {
\r
7497 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7501 if ( stream_.doConvertBuffer[mode] ) {
\r
7503 bool makeBuffer = true;
\r
7504 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7505 if ( mode == INPUT ) {
\r
7506 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7507 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7508 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7512 if ( makeBuffer ) {
\r
7513 bufferBytes *= *bufferSize;
\r
7514 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7515 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7516 if ( stream_.deviceBuffer == NULL ) {
\r
7517 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7523 stream_.sampleRate = sampleRate;
\r
7524 stream_.nBuffers = periods;
\r
7525 stream_.device[mode] = device;
\r
7526 stream_.state = STREAM_STOPPED;
\r
7528 // Setup the buffer conversion information structure.
\r
7529 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7531 // Setup thread if necessary.
\r
7532 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7533 // We had already set up an output stream.
\r
7534 stream_.mode = DUPLEX;
\r
7535 // Link the streams if possible.
\r
7536 apiInfo->synchronized = false;
\r
7537 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7538 apiInfo->synchronized = true;
\r
7540 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7541 error( RtAudioError::WARNING );
\r
7545 stream_.mode = mode;
\r
7547 // Setup callback thread.
\r
7548 stream_.callbackInfo.object = (void *) this;
\r
7550 // Set the thread attributes for joinable and realtime scheduling
\r
7551 // priority (optional). The higher priority will only take affect
\r
7552 // if the program is run as root or suid. Note, under Linux
\r
7553 // processes with CAP_SYS_NICE privilege, a user can change
\r
7554 // scheduling policy and priority (thus need not be root). See
\r
7555 // POSIX "capabilities".
\r
7556 pthread_attr_t attr;
\r
7557 pthread_attr_init( &attr );
\r
7558 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7560 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7561 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7562 // We previously attempted to increase the audio callback priority
\r
7563 // to SCHED_RR here via the attributes. However, while no errors
\r
7564 // were reported in doing so, it did not work. So, now this is
\r
7565 // done in the alsaCallbackHandler function.
\r
7566 stream_.callbackInfo.doRealtime = true;
\r
7567 int priority = options->priority;
\r
7568 int min = sched_get_priority_min( SCHED_RR );
\r
7569 int max = sched_get_priority_max( SCHED_RR );
\r
7570 if ( priority < min ) priority = min;
\r
7571 else if ( priority > max ) priority = max;
\r
7572 stream_.callbackInfo.priority = priority;
\r
7576 stream_.callbackInfo.isRunning = true;
\r
7577 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7578 pthread_attr_destroy( &attr );
\r
7580 stream_.callbackInfo.isRunning = false;
\r
7581 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7590 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7591 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7592 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7594 stream_.apiHandle = 0;
\r
7597 if ( phandle) snd_pcm_close( phandle );
\r
7599 for ( int i=0; i<2; i++ ) {
\r
7600 if ( stream_.userBuffer[i] ) {
\r
7601 free( stream_.userBuffer[i] );
\r
7602 stream_.userBuffer[i] = 0;
\r
7606 if ( stream_.deviceBuffer ) {
\r
7607 free( stream_.deviceBuffer );
\r
7608 stream_.deviceBuffer = 0;
\r
7611 stream_.state = STREAM_CLOSED;
\r
7615 void RtApiAlsa :: closeStream()
\r
7617 if ( stream_.state == STREAM_CLOSED ) {
\r
7618 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7619 error( RtAudioError::WARNING );
\r
7623 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7624 stream_.callbackInfo.isRunning = false;
\r
7625 MUTEX_LOCK( &stream_.mutex );
\r
7626 if ( stream_.state == STREAM_STOPPED ) {
\r
7627 apiInfo->runnable = true;
\r
7628 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7630 MUTEX_UNLOCK( &stream_.mutex );
\r
7631 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7633 if ( stream_.state == STREAM_RUNNING ) {
\r
7634 stream_.state = STREAM_STOPPED;
\r
7635 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7636 snd_pcm_drop( apiInfo->handles[0] );
\r
7637 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7638 snd_pcm_drop( apiInfo->handles[1] );
\r
7642 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7643 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7644 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7646 stream_.apiHandle = 0;
\r
7649 for ( int i=0; i<2; i++ ) {
\r
7650 if ( stream_.userBuffer[i] ) {
\r
7651 free( stream_.userBuffer[i] );
\r
7652 stream_.userBuffer[i] = 0;
\r
7656 if ( stream_.deviceBuffer ) {
\r
7657 free( stream_.deviceBuffer );
\r
7658 stream_.deviceBuffer = 0;
\r
7661 stream_.mode = UNINITIALIZED;
\r
7662 stream_.state = STREAM_CLOSED;
\r
7665 void RtApiAlsa :: startStream()
\r
7667 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7670 if ( stream_.state == STREAM_RUNNING ) {
\r
7671 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7672 error( RtAudioError::WARNING );
\r
7676 MUTEX_LOCK( &stream_.mutex );
\r
7679 snd_pcm_state_t state;
\r
7680 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7681 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7682 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7683 state = snd_pcm_state( handle[0] );
\r
7684 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7685 result = snd_pcm_prepare( handle[0] );
\r
7686 if ( result < 0 ) {
\r
7687 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7688 errorText_ = errorStream_.str();
\r
7694 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7695 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7696 state = snd_pcm_state( handle[1] );
\r
7697 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7698 result = snd_pcm_prepare( handle[1] );
\r
7699 if ( result < 0 ) {
\r
7700 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7701 errorText_ = errorStream_.str();
\r
7707 stream_.state = STREAM_RUNNING;
\r
7710 apiInfo->runnable = true;
\r
7711 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7712 MUTEX_UNLOCK( &stream_.mutex );
\r
7714 if ( result >= 0 ) return;
\r
7715 error( RtAudioError::SYSTEM_ERROR );
\r
7718 void RtApiAlsa :: stopStream()
\r
7721 if ( stream_.state == STREAM_STOPPED ) {
\r
7722 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7723 error( RtAudioError::WARNING );
\r
7727 stream_.state = STREAM_STOPPED;
\r
7728 MUTEX_LOCK( &stream_.mutex );
\r
7731 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7732 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7733 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7734 if ( apiInfo->synchronized )
\r
7735 result = snd_pcm_drop( handle[0] );
\r
7737 result = snd_pcm_drain( handle[0] );
\r
7738 if ( result < 0 ) {
\r
7739 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7740 errorText_ = errorStream_.str();
\r
7745 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7746 result = snd_pcm_drop( handle[1] );
\r
7747 if ( result < 0 ) {
\r
7748 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7749 errorText_ = errorStream_.str();
\r
7755 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7756 MUTEX_UNLOCK( &stream_.mutex );
\r
7758 if ( result >= 0 ) return;
\r
7759 error( RtAudioError::SYSTEM_ERROR );
\r
7762 void RtApiAlsa :: abortStream()
\r
7765 if ( stream_.state == STREAM_STOPPED ) {
\r
7766 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7767 error( RtAudioError::WARNING );
\r
7771 stream_.state = STREAM_STOPPED;
\r
7772 MUTEX_LOCK( &stream_.mutex );
\r
7775 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7776 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7777 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7778 result = snd_pcm_drop( handle[0] );
\r
7779 if ( result < 0 ) {
\r
7780 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7781 errorText_ = errorStream_.str();
\r
7786 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7787 result = snd_pcm_drop( handle[1] );
\r
7788 if ( result < 0 ) {
\r
7789 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7790 errorText_ = errorStream_.str();
\r
7796 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7797 MUTEX_UNLOCK( &stream_.mutex );
\r
7799 if ( result >= 0 ) return;
\r
7800 error( RtAudioError::SYSTEM_ERROR );
\r
7803 void RtApiAlsa :: callbackEvent()
\r
7805 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7806 if ( stream_.state == STREAM_STOPPED ) {
\r
7807 MUTEX_LOCK( &stream_.mutex );
\r
7808 while ( !apiInfo->runnable )
\r
7809 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7811 if ( stream_.state != STREAM_RUNNING ) {
\r
7812 MUTEX_UNLOCK( &stream_.mutex );
\r
7815 MUTEX_UNLOCK( &stream_.mutex );
\r
7818 if ( stream_.state == STREAM_CLOSED ) {
\r
7819 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7820 error( RtAudioError::WARNING );
\r
7824 int doStopStream = 0;
\r
7825 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7826 double streamTime = getStreamTime();
\r
7827 RtAudioStreamStatus status = 0;
\r
7828 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7829 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7830 apiInfo->xrun[0] = false;
\r
7832 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7833 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7834 apiInfo->xrun[1] = false;
\r
7836 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7837 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7839 if ( doStopStream == 2 ) {
\r
7844 MUTEX_LOCK( &stream_.mutex );
\r
7846 // The state might change while waiting on a mutex.
\r
7847 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7852 snd_pcm_t **handle;
\r
7853 snd_pcm_sframes_t frames;
\r
7854 RtAudioFormat format;
\r
7855 handle = (snd_pcm_t **) apiInfo->handles;
\r
7857 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7859 // Setup parameters.
\r
7860 if ( stream_.doConvertBuffer[1] ) {
\r
7861 buffer = stream_.deviceBuffer;
\r
7862 channels = stream_.nDeviceChannels[1];
\r
7863 format = stream_.deviceFormat[1];
\r
7866 buffer = stream_.userBuffer[1];
\r
7867 channels = stream_.nUserChannels[1];
\r
7868 format = stream_.userFormat;
\r
7871 // Read samples from device in interleaved/non-interleaved format.
\r
7872 if ( stream_.deviceInterleaved[1] )
\r
7873 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7875 void *bufs[channels];
\r
7876 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7877 for ( int i=0; i<channels; i++ )
\r
7878 bufs[i] = (void *) (buffer + (i * offset));
\r
7879 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7882 if ( result < (int) stream_.bufferSize ) {
\r
7883 // Either an error or overrun occured.
\r
7884 if ( result == -EPIPE ) {
\r
7885 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7886 if ( state == SND_PCM_STATE_XRUN ) {
\r
7887 apiInfo->xrun[1] = true;
\r
7888 result = snd_pcm_prepare( handle[1] );
\r
7889 if ( result < 0 ) {
\r
7890 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7891 errorText_ = errorStream_.str();
\r
7895 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7896 errorText_ = errorStream_.str();
\r
7900 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7901 errorText_ = errorStream_.str();
\r
7903 error( RtAudioError::WARNING );
\r
7907 // Do byte swapping if necessary.
\r
7908 if ( stream_.doByteSwap[1] )
\r
7909 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7911 // Do buffer conversion if necessary.
\r
7912 if ( stream_.doConvertBuffer[1] )
\r
7913 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7915 // Check stream latency
\r
7916 result = snd_pcm_delay( handle[1], &frames );
\r
7917 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7922 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7924 // Setup parameters and do buffer conversion if necessary.
\r
7925 if ( stream_.doConvertBuffer[0] ) {
\r
7926 buffer = stream_.deviceBuffer;
\r
7927 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7928 channels = stream_.nDeviceChannels[0];
\r
7929 format = stream_.deviceFormat[0];
\r
7932 buffer = stream_.userBuffer[0];
\r
7933 channels = stream_.nUserChannels[0];
\r
7934 format = stream_.userFormat;
\r
7937 // Do byte swapping if necessary.
\r
7938 if ( stream_.doByteSwap[0] )
\r
7939 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7941 // Write samples to device in interleaved/non-interleaved format.
\r
7942 if ( stream_.deviceInterleaved[0] )
\r
7943 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7945 void *bufs[channels];
\r
7946 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7947 for ( int i=0; i<channels; i++ )
\r
7948 bufs[i] = (void *) (buffer + (i * offset));
\r
7949 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7952 if ( result < (int) stream_.bufferSize ) {
\r
7953 // Either an error or underrun occured.
\r
7954 if ( result == -EPIPE ) {
\r
7955 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7956 if ( state == SND_PCM_STATE_XRUN ) {
\r
7957 apiInfo->xrun[0] = true;
\r
7958 result = snd_pcm_prepare( handle[0] );
\r
7959 if ( result < 0 ) {
\r
7960 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7961 errorText_ = errorStream_.str();
\r
7965 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7966 errorText_ = errorStream_.str();
\r
7970 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7971 errorText_ = errorStream_.str();
\r
7973 error( RtAudioError::WARNING );
\r
7977 // Check stream latency
\r
7978 result = snd_pcm_delay( handle[0], &frames );
\r
7979 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7983 MUTEX_UNLOCK( &stream_.mutex );
\r
7985 RtApi::tickStreamTime();
\r
7986 if ( doStopStream == 1 ) this->stopStream();
\r
7989 static void *alsaCallbackHandler( void *ptr )
\r
7991 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7992 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7993 bool *isRunning = &info->isRunning;
\r
7995 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7996 if ( &info->doRealtime ) {
\r
7997 pthread_t tID = pthread_self(); // ID of this thread
\r
7998 sched_param prio = { info->priority }; // scheduling priority of thread
\r
7999 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8003 while ( *isRunning == true ) {
\r
8004 pthread_testcancel();
\r
8005 object->callbackEvent();
\r
8008 pthread_exit( NULL );
\r
8011 //******************** End of __LINUX_ALSA__ *********************//
\r
8014 #if defined(__LINUX_PULSE__)
\r
8016 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8017 // and Tristan Matthews.
\r
8019 #include <pulse/error.h>
\r
8020 #include <pulse/simple.h>
\r
8023 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8024 44100, 48000, 96000, 0};
\r
8026 struct rtaudio_pa_format_mapping_t {
\r
8027 RtAudioFormat rtaudio_format;
\r
8028 pa_sample_format_t pa_format;
\r
8031 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8032 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8033 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8034 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8035 {0, PA_SAMPLE_INVALID}};
\r
8037 struct PulseAudioHandle {
\r
8038 pa_simple *s_play;
\r
8041 pthread_cond_t runnable_cv;
\r
8043 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8046 RtApiPulse::~RtApiPulse()
\r
8048 if ( stream_.state != STREAM_CLOSED )
\r
8052 unsigned int RtApiPulse::getDeviceCount( void )
\r
8057 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8059 RtAudio::DeviceInfo info;
\r
8060 info.probed = true;
\r
8061 info.name = "PulseAudio";
\r
8062 info.outputChannels = 2;
\r
8063 info.inputChannels = 2;
\r
8064 info.duplexChannels = 2;
\r
8065 info.isDefaultOutput = true;
\r
8066 info.isDefaultInput = true;
\r
8068 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8069 info.sampleRates.push_back( *sr );
\r
8071 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8076 static void *pulseaudio_callback( void * user )
\r
8078 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8079 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8080 volatile bool *isRunning = &cbi->isRunning;
\r
8082 while ( *isRunning ) {
\r
8083 pthread_testcancel();
\r
8084 context->callbackEvent();
\r
8087 pthread_exit( NULL );
\r
8090 void RtApiPulse::closeStream( void )
\r
8092 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8094 stream_.callbackInfo.isRunning = false;
\r
8096 MUTEX_LOCK( &stream_.mutex );
\r
8097 if ( stream_.state == STREAM_STOPPED ) {
\r
8098 pah->runnable = true;
\r
8099 pthread_cond_signal( &pah->runnable_cv );
\r
8101 MUTEX_UNLOCK( &stream_.mutex );
\r
8103 pthread_join( pah->thread, 0 );
\r
8104 if ( pah->s_play ) {
\r
8105 pa_simple_flush( pah->s_play, NULL );
\r
8106 pa_simple_free( pah->s_play );
\r
8109 pa_simple_free( pah->s_rec );
\r
8111 pthread_cond_destroy( &pah->runnable_cv );
\r
8113 stream_.apiHandle = 0;
\r
8116 if ( stream_.userBuffer[0] ) {
\r
8117 free( stream_.userBuffer[0] );
\r
8118 stream_.userBuffer[0] = 0;
\r
8120 if ( stream_.userBuffer[1] ) {
\r
8121 free( stream_.userBuffer[1] );
\r
8122 stream_.userBuffer[1] = 0;
\r
8125 stream_.state = STREAM_CLOSED;
\r
8126 stream_.mode = UNINITIALIZED;
\r
8129 void RtApiPulse::callbackEvent( void )
\r
8131 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8133 if ( stream_.state == STREAM_STOPPED ) {
\r
8134 MUTEX_LOCK( &stream_.mutex );
\r
8135 while ( !pah->runnable )
\r
8136 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8138 if ( stream_.state != STREAM_RUNNING ) {
\r
8139 MUTEX_UNLOCK( &stream_.mutex );
\r
8142 MUTEX_UNLOCK( &stream_.mutex );
\r
8145 if ( stream_.state == STREAM_CLOSED ) {
\r
8146 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8147 "this shouldn't happen!";
\r
8148 error( RtAudioError::WARNING );
\r
8152 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8153 double streamTime = getStreamTime();
\r
8154 RtAudioStreamStatus status = 0;
\r
8155 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8156 stream_.bufferSize, streamTime, status,
\r
8157 stream_.callbackInfo.userData );
\r
8159 if ( doStopStream == 2 ) {
\r
8164 MUTEX_LOCK( &stream_.mutex );
\r
8165 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8166 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8168 if ( stream_.state != STREAM_RUNNING )
\r
8173 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8174 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8175 convertBuffer( stream_.deviceBuffer,
\r
8176 stream_.userBuffer[OUTPUT],
\r
8177 stream_.convertInfo[OUTPUT] );
\r
8178 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8179 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8181 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8182 formatBytes( stream_.userFormat );
\r
8184 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8185 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8186 pa_strerror( pa_error ) << ".";
\r
8187 errorText_ = errorStream_.str();
\r
8188 error( RtAudioError::WARNING );
\r
8192 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8193 if ( stream_.doConvertBuffer[INPUT] )
\r
8194 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8195 formatBytes( stream_.deviceFormat[INPUT] );
\r
8197 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8198 formatBytes( stream_.userFormat );
\r
8200 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8201 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8202 pa_strerror( pa_error ) << ".";
\r
8203 errorText_ = errorStream_.str();
\r
8204 error( RtAudioError::WARNING );
\r
8206 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8207 convertBuffer( stream_.userBuffer[INPUT],
\r
8208 stream_.deviceBuffer,
\r
8209 stream_.convertInfo[INPUT] );
\r
8214 MUTEX_UNLOCK( &stream_.mutex );
\r
8215 RtApi::tickStreamTime();
\r
8217 if ( doStopStream == 1 )
\r
8221 void RtApiPulse::startStream( void )
\r
8223 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8225 if ( stream_.state == STREAM_CLOSED ) {
\r
8226 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8227 error( RtAudioError::INVALID_USE );
\r
8230 if ( stream_.state == STREAM_RUNNING ) {
\r
8231 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8232 error( RtAudioError::WARNING );
\r
8236 MUTEX_LOCK( &stream_.mutex );
\r
8238 stream_.state = STREAM_RUNNING;
\r
8240 pah->runnable = true;
\r
8241 pthread_cond_signal( &pah->runnable_cv );
\r
8242 MUTEX_UNLOCK( &stream_.mutex );
\r
8245 void RtApiPulse::stopStream( void )
\r
8247 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8249 if ( stream_.state == STREAM_CLOSED ) {
\r
8250 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8251 error( RtAudioError::INVALID_USE );
\r
8254 if ( stream_.state == STREAM_STOPPED ) {
\r
8255 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8256 error( RtAudioError::WARNING );
\r
8260 stream_.state = STREAM_STOPPED;
\r
8261 MUTEX_LOCK( &stream_.mutex );
\r
8263 if ( pah && pah->s_play ) {
\r
8265 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8266 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8267 pa_strerror( pa_error ) << ".";
\r
8268 errorText_ = errorStream_.str();
\r
8269 MUTEX_UNLOCK( &stream_.mutex );
\r
8270 error( RtAudioError::SYSTEM_ERROR );
\r
8275 stream_.state = STREAM_STOPPED;
\r
8276 MUTEX_UNLOCK( &stream_.mutex );
\r
8279 void RtApiPulse::abortStream( void )
\r
8281 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8283 if ( stream_.state == STREAM_CLOSED ) {
\r
8284 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8285 error( RtAudioError::INVALID_USE );
\r
8288 if ( stream_.state == STREAM_STOPPED ) {
\r
8289 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8290 error( RtAudioError::WARNING );
\r
8294 stream_.state = STREAM_STOPPED;
\r
8295 MUTEX_LOCK( &stream_.mutex );
\r
8297 if ( pah && pah->s_play ) {
\r
8299 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8300 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8301 pa_strerror( pa_error ) << ".";
\r
8302 errorText_ = errorStream_.str();
\r
8303 MUTEX_UNLOCK( &stream_.mutex );
\r
8304 error( RtAudioError::SYSTEM_ERROR );
\r
8309 stream_.state = STREAM_STOPPED;
\r
8310 MUTEX_UNLOCK( &stream_.mutex );
\r
8313 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8314 unsigned int channels, unsigned int firstChannel,
\r
8315 unsigned int sampleRate, RtAudioFormat format,
\r
8316 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8318 PulseAudioHandle *pah = 0;
\r
8319 unsigned long bufferBytes = 0;
\r
8320 pa_sample_spec ss;
\r
8322 if ( device != 0 ) return false;
\r
8323 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8324 if ( channels != 1 && channels != 2 ) {
\r
8325 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8328 ss.channels = channels;
\r
8330 if ( firstChannel != 0 ) return false;
\r
8332 bool sr_found = false;
\r
8333 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8334 if ( sampleRate == *sr ) {
\r
8336 stream_.sampleRate = sampleRate;
\r
8337 ss.rate = sampleRate;
\r
8341 if ( !sr_found ) {
\r
8342 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8346 bool sf_found = 0;
\r
8347 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8348 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8349 if ( format == sf->rtaudio_format ) {
\r
8351 stream_.userFormat = sf->rtaudio_format;
\r
8352 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8353 ss.format = sf->pa_format;
\r
8357 if ( !sf_found ) { // Use internal data format conversion.
\r
8358 stream_.userFormat = format;
\r
8359 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8360 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8363 // Set other stream parameters.
\r
8364 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8365 else stream_.userInterleaved = true;
\r
8366 stream_.deviceInterleaved[mode] = true;
\r
8367 stream_.nBuffers = 1;
\r
8368 stream_.doByteSwap[mode] = false;
\r
8369 stream_.nUserChannels[mode] = channels;
\r
8370 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8371 stream_.channelOffset[mode] = 0;
\r
8372 std::string streamName = "RtAudio";
\r
8374 // Set flags for buffer conversion.
\r
8375 stream_.doConvertBuffer[mode] = false;
\r
8376 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8377 stream_.doConvertBuffer[mode] = true;
\r
8378 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8379 stream_.doConvertBuffer[mode] = true;
\r
8381 // Allocate necessary internal buffers.
\r
8382 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8383 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8384 if ( stream_.userBuffer[mode] == NULL ) {
\r
8385 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8388 stream_.bufferSize = *bufferSize;
\r
8390 if ( stream_.doConvertBuffer[mode] ) {
\r
8392 bool makeBuffer = true;
\r
8393 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8394 if ( mode == INPUT ) {
\r
8395 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8396 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8397 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8401 if ( makeBuffer ) {
\r
8402 bufferBytes *= *bufferSize;
\r
8403 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8404 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8405 if ( stream_.deviceBuffer == NULL ) {
\r
8406 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8412 stream_.device[mode] = device;
\r
8414 // Setup the buffer conversion information structure.
\r
8415 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8417 if ( !stream_.apiHandle ) {
\r
8418 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8420 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8424 stream_.apiHandle = pah;
\r
8425 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8426 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8430 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8433 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8436 pa_buffer_attr buffer_attr;
\r
8437 buffer_attr.fragsize = bufferBytes;
\r
8438 buffer_attr.maxlength = -1;
\r
8440 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8441 if ( !pah->s_rec ) {
\r
8442 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8447 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8448 if ( !pah->s_play ) {
\r
8449 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8457 if ( stream_.mode == UNINITIALIZED )
\r
8458 stream_.mode = mode;
\r
8459 else if ( stream_.mode == mode )
\r
8462 stream_.mode = DUPLEX;
\r
8464 if ( !stream_.callbackInfo.isRunning ) {
\r
8465 stream_.callbackInfo.object = this;
\r
8466 stream_.callbackInfo.isRunning = true;
\r
8467 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8468 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8473 stream_.state = STREAM_STOPPED;
\r
8477 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8478 pthread_cond_destroy( &pah->runnable_cv );
\r
8480 stream_.apiHandle = 0;
\r
8483 for ( int i=0; i<2; i++ ) {
\r
8484 if ( stream_.userBuffer[i] ) {
\r
8485 free( stream_.userBuffer[i] );
\r
8486 stream_.userBuffer[i] = 0;
\r
8490 if ( stream_.deviceBuffer ) {
\r
8491 free( stream_.deviceBuffer );
\r
8492 stream_.deviceBuffer = 0;
\r
8498 //******************** End of __LINUX_PULSE__ *********************//
\r
8501 #if defined(__LINUX_OSS__)
\r
8503 #include <unistd.h>
\r
8504 #include <sys/ioctl.h>
\r
8505 #include <unistd.h>
\r
8506 #include <fcntl.h>
\r
8507 #include <sys/soundcard.h>
\r
8508 #include <errno.h>
\r
8511 static void *ossCallbackHandler(void * ptr);
\r
8513 // A structure to hold various information related to the OSS API
\r
8514 // implementation.
\r
8515 struct OssHandle {
\r
8516 int id[2]; // device ids
\r
8519 pthread_cond_t runnable;
\r
8522 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8525 RtApiOss :: RtApiOss()
\r
8527 // Nothing to do here.
\r
8530 RtApiOss :: ~RtApiOss()
\r
8532 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8535 unsigned int RtApiOss :: getDeviceCount( void )
\r
8537 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8538 if ( mixerfd == -1 ) {
\r
8539 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8540 error( RtAudioError::WARNING );
\r
8544 oss_sysinfo sysinfo;
\r
8545 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8547 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8548 error( RtAudioError::WARNING );
\r
8553 return sysinfo.numaudios;
\r
8556 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8558 RtAudio::DeviceInfo info;
\r
8559 info.probed = false;
\r
8561 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8562 if ( mixerfd == -1 ) {
\r
8563 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8564 error( RtAudioError::WARNING );
\r
8568 oss_sysinfo sysinfo;
\r
8569 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8570 if ( result == -1 ) {
\r
8572 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8573 error( RtAudioError::WARNING );
\r
8577 unsigned nDevices = sysinfo.numaudios;
\r
8578 if ( nDevices == 0 ) {
\r
8580 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8581 error( RtAudioError::INVALID_USE );
\r
8585 if ( device >= nDevices ) {
\r
8587 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8588 error( RtAudioError::INVALID_USE );
\r
8592 oss_audioinfo ainfo;
\r
8593 ainfo.dev = device;
\r
8594 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8596 if ( result == -1 ) {
\r
8597 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8598 errorText_ = errorStream_.str();
\r
8599 error( RtAudioError::WARNING );
\r
8604 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8605 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8606 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8607 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8608 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8611 // Probe data formats ... do for input
\r
8612 unsigned long mask = ainfo.iformats;
\r
8613 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8614 info.nativeFormats |= RTAUDIO_SINT16;
\r
8615 if ( mask & AFMT_S8 )
\r
8616 info.nativeFormats |= RTAUDIO_SINT8;
\r
8617 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8618 info.nativeFormats |= RTAUDIO_SINT32;
\r
8619 if ( mask & AFMT_FLOAT )
\r
8620 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8621 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8622 info.nativeFormats |= RTAUDIO_SINT24;
\r
8624 // Check that we have at least one supported format
\r
8625 if ( info.nativeFormats == 0 ) {
\r
8626 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8627 errorText_ = errorStream_.str();
\r
8628 error( RtAudioError::WARNING );
\r
8632 // Probe the supported sample rates.
\r
8633 info.sampleRates.clear();
\r
8634 if ( ainfo.nrates ) {
\r
8635 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8636 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8637 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8638 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8645 // Check min and max rate values;
\r
8646 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8647 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8648 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8652 if ( info.sampleRates.size() == 0 ) {
\r
8653 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8654 errorText_ = errorStream_.str();
\r
8655 error( RtAudioError::WARNING );
\r
8658 info.probed = true;
\r
8659 info.name = ainfo.name;
\r
8666 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8667 unsigned int firstChannel, unsigned int sampleRate,
\r
8668 RtAudioFormat format, unsigned int *bufferSize,
\r
8669 RtAudio::StreamOptions *options )
\r
8671 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8672 if ( mixerfd == -1 ) {
\r
8673 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8677 oss_sysinfo sysinfo;
\r
8678 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8679 if ( result == -1 ) {
\r
8681 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8685 unsigned nDevices = sysinfo.numaudios;
\r
8686 if ( nDevices == 0 ) {
\r
8687 // This should not happen because a check is made before this function is called.
\r
8689 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8693 if ( device >= nDevices ) {
\r
8694 // This should not happen because a check is made before this function is called.
\r
8696 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8700 oss_audioinfo ainfo;
\r
8701 ainfo.dev = device;
\r
8702 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8704 if ( result == -1 ) {
\r
8705 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8706 errorText_ = errorStream_.str();
\r
8710 // Check if device supports input or output
\r
8711 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8712 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8713 if ( mode == OUTPUT )
\r
8714 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8716 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8717 errorText_ = errorStream_.str();
\r
8722 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8723 if ( mode == OUTPUT )
\r
8724 flags |= O_WRONLY;
\r
8725 else { // mode == INPUT
\r
8726 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8727 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8728 close( handle->id[0] );
\r
8729 handle->id[0] = 0;
\r
8730 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8731 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8732 errorText_ = errorStream_.str();
\r
8735 // Check that the number previously set channels is the same.
\r
8736 if ( stream_.nUserChannels[0] != channels ) {
\r
8737 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8738 errorText_ = errorStream_.str();
\r
8744 flags |= O_RDONLY;
\r
8747 // Set exclusive access if specified.
\r
8748 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8750 // Try to open the device.
\r
8752 fd = open( ainfo.devnode, flags, 0 );
\r
8754 if ( errno == EBUSY )
\r
8755 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8757 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8758 errorText_ = errorStream_.str();
\r
8762 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8764 if ( flags | O_RDWR ) {
\r
8765 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8766 if ( result == -1) {
\r
8767 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8768 errorText_ = errorStream_.str();
\r
8774 // Check the device channel support.
\r
8775 stream_.nUserChannels[mode] = channels;
\r
8776 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8778 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8779 errorText_ = errorStream_.str();
\r
8783 // Set the number of channels.
\r
8784 int deviceChannels = channels + firstChannel;
\r
8785 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8786 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8788 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8789 errorText_ = errorStream_.str();
\r
8792 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8794 // Get the data format mask
\r
8796 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8797 if ( result == -1 ) {
\r
8799 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8800 errorText_ = errorStream_.str();
\r
8804 // Determine how to set the device format.
\r
8805 stream_.userFormat = format;
\r
8806 int deviceFormat = -1;
\r
8807 stream_.doByteSwap[mode] = false;
\r
8808 if ( format == RTAUDIO_SINT8 ) {
\r
8809 if ( mask & AFMT_S8 ) {
\r
8810 deviceFormat = AFMT_S8;
\r
8811 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8814 else if ( format == RTAUDIO_SINT16 ) {
\r
8815 if ( mask & AFMT_S16_NE ) {
\r
8816 deviceFormat = AFMT_S16_NE;
\r
8817 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8819 else if ( mask & AFMT_S16_OE ) {
\r
8820 deviceFormat = AFMT_S16_OE;
\r
8821 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8822 stream_.doByteSwap[mode] = true;
\r
8825 else if ( format == RTAUDIO_SINT24 ) {
\r
8826 if ( mask & AFMT_S24_NE ) {
\r
8827 deviceFormat = AFMT_S24_NE;
\r
8828 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8830 else if ( mask & AFMT_S24_OE ) {
\r
8831 deviceFormat = AFMT_S24_OE;
\r
8832 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8833 stream_.doByteSwap[mode] = true;
\r
8836 else if ( format == RTAUDIO_SINT32 ) {
\r
8837 if ( mask & AFMT_S32_NE ) {
\r
8838 deviceFormat = AFMT_S32_NE;
\r
8839 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8841 else if ( mask & AFMT_S32_OE ) {
\r
8842 deviceFormat = AFMT_S32_OE;
\r
8843 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8844 stream_.doByteSwap[mode] = true;
\r
8848 if ( deviceFormat == -1 ) {
\r
8849 // The user requested format is not natively supported by the device.
\r
8850 if ( mask & AFMT_S16_NE ) {
\r
8851 deviceFormat = AFMT_S16_NE;
\r
8852 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8854 else if ( mask & AFMT_S32_NE ) {
\r
8855 deviceFormat = AFMT_S32_NE;
\r
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8858 else if ( mask & AFMT_S24_NE ) {
\r
8859 deviceFormat = AFMT_S24_NE;
\r
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8862 else if ( mask & AFMT_S16_OE ) {
\r
8863 deviceFormat = AFMT_S16_OE;
\r
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8865 stream_.doByteSwap[mode] = true;
\r
8867 else if ( mask & AFMT_S32_OE ) {
\r
8868 deviceFormat = AFMT_S32_OE;
\r
8869 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8870 stream_.doByteSwap[mode] = true;
\r
8872 else if ( mask & AFMT_S24_OE ) {
\r
8873 deviceFormat = AFMT_S24_OE;
\r
8874 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8875 stream_.doByteSwap[mode] = true;
\r
8877 else if ( mask & AFMT_S8) {
\r
8878 deviceFormat = AFMT_S8;
\r
8879 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8883 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8884 // This really shouldn't happen ...
\r
8886 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8887 errorText_ = errorStream_.str();
\r
8891 // Set the data format.
\r
8892 int temp = deviceFormat;
\r
8893 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8894 if ( result == -1 || deviceFormat != temp ) {
\r
8896 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8897 errorText_ = errorStream_.str();
\r
8901 // Attempt to set the buffer size. According to OSS, the minimum
\r
8902 // number of buffers is two. The supposed minimum buffer size is 16
\r
8903 // bytes, so that will be our lower bound. The argument to this
\r
8904 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8905 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8906 // We'll check the actual value used near the end of the setup
\r
8908 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8909 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8911 if ( options ) buffers = options->numberOfBuffers;
\r
8912 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8913 if ( buffers < 2 ) buffers = 3;
\r
8914 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8915 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8916 if ( result == -1 ) {
\r
8918 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8919 errorText_ = errorStream_.str();
\r
8922 stream_.nBuffers = buffers;
\r
8924 // Save buffer size (in sample frames).
\r
8925 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8926 stream_.bufferSize = *bufferSize;
\r
8928 // Set the sample rate.
\r
8929 int srate = sampleRate;
\r
8930 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8931 if ( result == -1 ) {
\r
8933 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8934 errorText_ = errorStream_.str();
\r
8938 // Verify the sample rate setup worked.
\r
8939 if ( abs( srate - sampleRate ) > 100 ) {
\r
8941 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8942 errorText_ = errorStream_.str();
\r
8945 stream_.sampleRate = sampleRate;
\r
8947 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8948 // We're doing duplex setup here.
\r
8949 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8950 stream_.nDeviceChannels[0] = deviceChannels;
\r
8953 // Set interleaving parameters.
\r
8954 stream_.userInterleaved = true;
\r
8955 stream_.deviceInterleaved[mode] = true;
\r
8956 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8957 stream_.userInterleaved = false;
\r
8959 // Set flags for buffer conversion
\r
8960 stream_.doConvertBuffer[mode] = false;
\r
8961 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8962 stream_.doConvertBuffer[mode] = true;
\r
8963 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8964 stream_.doConvertBuffer[mode] = true;
\r
8965 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8966 stream_.nUserChannels[mode] > 1 )
\r
8967 stream_.doConvertBuffer[mode] = true;
\r
8969 // Allocate the stream handles if necessary and then save.
\r
8970 if ( stream_.apiHandle == 0 ) {
\r
8972 handle = new OssHandle;
\r
8974 catch ( std::bad_alloc& ) {
\r
8975 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8979 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8980 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8984 stream_.apiHandle = (void *) handle;
\r
8987 handle = (OssHandle *) stream_.apiHandle;
\r
8989 handle->id[mode] = fd;
\r
8991 // Allocate necessary internal buffers.
\r
8992 unsigned long bufferBytes;
\r
8993 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8994 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8995 if ( stream_.userBuffer[mode] == NULL ) {
\r
8996 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9000 if ( stream_.doConvertBuffer[mode] ) {
\r
9002 bool makeBuffer = true;
\r
9003 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9004 if ( mode == INPUT ) {
\r
9005 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9006 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9007 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9011 if ( makeBuffer ) {
\r
9012 bufferBytes *= *bufferSize;
\r
9013 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9014 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9015 if ( stream_.deviceBuffer == NULL ) {
\r
9016 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9022 stream_.device[mode] = device;
\r
9023 stream_.state = STREAM_STOPPED;
\r
9025 // Setup the buffer conversion information structure.
\r
9026 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9028 // Setup thread if necessary.
\r
9029 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9030 // We had already set up an output stream.
\r
9031 stream_.mode = DUPLEX;
\r
9032 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9035 stream_.mode = mode;
\r
9037 // Setup callback thread.
\r
9038 stream_.callbackInfo.object = (void *) this;
\r
9040 // Set the thread attributes for joinable and realtime scheduling
\r
9041 // priority. The higher priority will only take affect if the
\r
9042 // program is run as root or suid.
\r
9043 pthread_attr_t attr;
\r
9044 pthread_attr_init( &attr );
\r
9045 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9046 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9047 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9048 struct sched_param param;
\r
9049 int priority = options->priority;
\r
9050 int min = sched_get_priority_min( SCHED_RR );
\r
9051 int max = sched_get_priority_max( SCHED_RR );
\r
9052 if ( priority < min ) priority = min;
\r
9053 else if ( priority > max ) priority = max;
\r
9054 param.sched_priority = priority;
\r
9055 pthread_attr_setschedparam( &attr, ¶m );
\r
9056 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9059 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9061 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9064 stream_.callbackInfo.isRunning = true;
\r
9065 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9066 pthread_attr_destroy( &attr );
\r
9068 stream_.callbackInfo.isRunning = false;
\r
9069 errorText_ = "RtApiOss::error creating callback thread!";
\r
9078 pthread_cond_destroy( &handle->runnable );
\r
9079 if ( handle->id[0] ) close( handle->id[0] );
\r
9080 if ( handle->id[1] ) close( handle->id[1] );
\r
9082 stream_.apiHandle = 0;
\r
9085 for ( int i=0; i<2; i++ ) {
\r
9086 if ( stream_.userBuffer[i] ) {
\r
9087 free( stream_.userBuffer[i] );
\r
9088 stream_.userBuffer[i] = 0;
\r
9092 if ( stream_.deviceBuffer ) {
\r
9093 free( stream_.deviceBuffer );
\r
9094 stream_.deviceBuffer = 0;
\r
9100 void RtApiOss :: closeStream()
\r
9102 if ( stream_.state == STREAM_CLOSED ) {
\r
9103 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9104 error( RtAudioError::WARNING );
\r
9108 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9109 stream_.callbackInfo.isRunning = false;
\r
9110 MUTEX_LOCK( &stream_.mutex );
\r
9111 if ( stream_.state == STREAM_STOPPED )
\r
9112 pthread_cond_signal( &handle->runnable );
\r
9113 MUTEX_UNLOCK( &stream_.mutex );
\r
9114 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9116 if ( stream_.state == STREAM_RUNNING ) {
\r
9117 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9118 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9120 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9121 stream_.state = STREAM_STOPPED;
\r
9125 pthread_cond_destroy( &handle->runnable );
\r
9126 if ( handle->id[0] ) close( handle->id[0] );
\r
9127 if ( handle->id[1] ) close( handle->id[1] );
\r
9129 stream_.apiHandle = 0;
\r
9132 for ( int i=0; i<2; i++ ) {
\r
9133 if ( stream_.userBuffer[i] ) {
\r
9134 free( stream_.userBuffer[i] );
\r
9135 stream_.userBuffer[i] = 0;
\r
9139 if ( stream_.deviceBuffer ) {
\r
9140 free( stream_.deviceBuffer );
\r
9141 stream_.deviceBuffer = 0;
\r
9144 stream_.mode = UNINITIALIZED;
\r
9145 stream_.state = STREAM_CLOSED;
\r
9148 void RtApiOss :: startStream()
\r
9151 if ( stream_.state == STREAM_RUNNING ) {
\r
9152 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9153 error( RtAudioError::WARNING );
\r
9157 MUTEX_LOCK( &stream_.mutex );
\r
9159 stream_.state = STREAM_RUNNING;
\r
9161 // No need to do anything else here ... OSS automatically starts
\r
9162 // when fed samples.
\r
9164 MUTEX_UNLOCK( &stream_.mutex );
\r
9166 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9167 pthread_cond_signal( &handle->runnable );
\r
9170 void RtApiOss :: stopStream()
\r
9173 if ( stream_.state == STREAM_STOPPED ) {
\r
9174 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9175 error( RtAudioError::WARNING );
\r
9179 MUTEX_LOCK( &stream_.mutex );
\r
9181 // The state might change while waiting on a mutex.
\r
9182 if ( stream_.state == STREAM_STOPPED ) {
\r
9183 MUTEX_UNLOCK( &stream_.mutex );
\r
9188 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9189 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9191 // Flush the output with zeros a few times.
\r
9194 RtAudioFormat format;
\r
9196 if ( stream_.doConvertBuffer[0] ) {
\r
9197 buffer = stream_.deviceBuffer;
\r
9198 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9199 format = stream_.deviceFormat[0];
\r
9202 buffer = stream_.userBuffer[0];
\r
9203 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9204 format = stream_.userFormat;
\r
9207 memset( buffer, 0, samples * formatBytes(format) );
\r
9208 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9209 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9210 if ( result == -1 ) {
\r
9211 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9212 error( RtAudioError::WARNING );
\r
9216 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9217 if ( result == -1 ) {
\r
9218 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9219 errorText_ = errorStream_.str();
\r
9222 handle->triggered = false;
\r
9225 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9226 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9227 if ( result == -1 ) {
\r
9228 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9229 errorText_ = errorStream_.str();
\r
9235 stream_.state = STREAM_STOPPED;
\r
9236 MUTEX_UNLOCK( &stream_.mutex );
\r
9238 if ( result != -1 ) return;
\r
9239 error( RtAudioError::SYSTEM_ERROR );
\r
9242 void RtApiOss :: abortStream()
\r
9245 if ( stream_.state == STREAM_STOPPED ) {
\r
9246 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9247 error( RtAudioError::WARNING );
\r
9251 MUTEX_LOCK( &stream_.mutex );
\r
9253 // The state might change while waiting on a mutex.
\r
9254 if ( stream_.state == STREAM_STOPPED ) {
\r
9255 MUTEX_UNLOCK( &stream_.mutex );
\r
9260 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9261 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9262 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9263 if ( result == -1 ) {
\r
9264 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9265 errorText_ = errorStream_.str();
\r
9268 handle->triggered = false;
\r
9271 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9272 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9273 if ( result == -1 ) {
\r
9274 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9275 errorText_ = errorStream_.str();
\r
9281 stream_.state = STREAM_STOPPED;
\r
9282 MUTEX_UNLOCK( &stream_.mutex );
\r
9284 if ( result != -1 ) return;
\r
9285 error( RtAudioError::SYSTEM_ERROR );
\r
9288 void RtApiOss :: callbackEvent()
\r
9290 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9291 if ( stream_.state == STREAM_STOPPED ) {
\r
9292 MUTEX_LOCK( &stream_.mutex );
\r
9293 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9294 if ( stream_.state != STREAM_RUNNING ) {
\r
9295 MUTEX_UNLOCK( &stream_.mutex );
\r
9298 MUTEX_UNLOCK( &stream_.mutex );
\r
9301 if ( stream_.state == STREAM_CLOSED ) {
\r
9302 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9303 error( RtAudioError::WARNING );
\r
9307 // Invoke user callback to get fresh output data.
\r
9308 int doStopStream = 0;
\r
9309 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9310 double streamTime = getStreamTime();
\r
9311 RtAudioStreamStatus status = 0;
\r
9312 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9313 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9314 handle->xrun[0] = false;
\r
9316 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9317 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9318 handle->xrun[1] = false;
\r
9320 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9321 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9322 if ( doStopStream == 2 ) {
\r
9323 this->abortStream();
\r
9327 MUTEX_LOCK( &stream_.mutex );
\r
9329 // The state might change while waiting on a mutex.
\r
9330 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9335 RtAudioFormat format;
\r
9337 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9339 // Setup parameters and do buffer conversion if necessary.
\r
9340 if ( stream_.doConvertBuffer[0] ) {
\r
9341 buffer = stream_.deviceBuffer;
\r
9342 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9343 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9344 format = stream_.deviceFormat[0];
\r
9347 buffer = stream_.userBuffer[0];
\r
9348 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9349 format = stream_.userFormat;
\r
9352 // Do byte swapping if necessary.
\r
9353 if ( stream_.doByteSwap[0] )
\r
9354 byteSwapBuffer( buffer, samples, format );
\r
9356 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9358 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9359 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9360 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9361 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9362 handle->triggered = true;
\r
9365 // Write samples to device.
\r
9366 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9368 if ( result == -1 ) {
\r
9369 // We'll assume this is an underrun, though there isn't a
\r
9370 // specific means for determining that.
\r
9371 handle->xrun[0] = true;
\r
9372 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9373 error( RtAudioError::WARNING );
\r
9374 // Continue on to input section.
\r
9378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9380 // Setup parameters.
\r
9381 if ( stream_.doConvertBuffer[1] ) {
\r
9382 buffer = stream_.deviceBuffer;
\r
9383 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9384 format = stream_.deviceFormat[1];
\r
9387 buffer = stream_.userBuffer[1];
\r
9388 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9389 format = stream_.userFormat;
\r
9392 // Read samples from device.
\r
9393 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9395 if ( result == -1 ) {
\r
9396 // We'll assume this is an overrun, though there isn't a
\r
9397 // specific means for determining that.
\r
9398 handle->xrun[1] = true;
\r
9399 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9400 error( RtAudioError::WARNING );
\r
9404 // Do byte swapping if necessary.
\r
9405 if ( stream_.doByteSwap[1] )
\r
9406 byteSwapBuffer( buffer, samples, format );
\r
9408 // Do buffer conversion if necessary.
\r
9409 if ( stream_.doConvertBuffer[1] )
\r
9410 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9414 MUTEX_UNLOCK( &stream_.mutex );
\r
9416 RtApi::tickStreamTime();
\r
9417 if ( doStopStream == 1 ) this->stopStream();
\r
9420 static void *ossCallbackHandler( void *ptr )
\r
9422 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9423 RtApiOss *object = (RtApiOss *) info->object;
\r
9424 bool *isRunning = &info->isRunning;
\r
9426 while ( *isRunning == true ) {
\r
9427 pthread_testcancel();
\r
9428 object->callbackEvent();
\r
9431 pthread_exit( NULL );
\r
9434 //******************** End of __LINUX_OSS__ *********************//
\r
9438 // *************************************************** //
\r
9440 // Protected common (OS-independent) RtAudio methods.
\r
9442 // *************************************************** //
\r
9444 // This method can be modified to control the behavior of error
\r
9445 // message printing.
\r
9446 void RtApi :: error( RtAudioError::Type type )
\r
9448 errorStream_.str(""); // clear the ostringstream
\r
9450 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9451 if ( errorCallback ) {
\r
9452 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9454 if ( firstErrorOccurred_ )
\r
9457 firstErrorOccurred_ = true;
\r
9458 const std::string errorMessage = errorText_;
\r
9460 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9461 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9465 errorCallback( type, errorMessage );
\r
9466 firstErrorOccurred_ = false;
\r
9470 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9471 std::cerr << '\n' << errorText_ << "\n\n";
\r
9472 else if ( type != RtAudioError::WARNING )
\r
9473 throw( RtAudioError( errorText_, type ) );
\r
9476 void RtApi :: verifyStream()
\r
9478 if ( stream_.state == STREAM_CLOSED ) {
\r
9479 errorText_ = "RtApi:: a stream is not open!";
\r
9480 error( RtAudioError::INVALID_USE );
\r
9484 void RtApi :: clearStreamInfo()
\r
9486 stream_.mode = UNINITIALIZED;
\r
9487 stream_.state = STREAM_CLOSED;
\r
9488 stream_.sampleRate = 0;
\r
9489 stream_.bufferSize = 0;
\r
9490 stream_.nBuffers = 0;
\r
9491 stream_.userFormat = 0;
\r
9492 stream_.userInterleaved = true;
\r
9493 stream_.streamTime = 0.0;
\r
9494 stream_.apiHandle = 0;
\r
9495 stream_.deviceBuffer = 0;
\r
9496 stream_.callbackInfo.callback = 0;
\r
9497 stream_.callbackInfo.userData = 0;
\r
9498 stream_.callbackInfo.isRunning = false;
\r
9499 stream_.callbackInfo.errorCallback = 0;
\r
9500 for ( int i=0; i<2; i++ ) {
\r
9501 stream_.device[i] = 11111;
\r
9502 stream_.doConvertBuffer[i] = false;
\r
9503 stream_.deviceInterleaved[i] = true;
\r
9504 stream_.doByteSwap[i] = false;
\r
9505 stream_.nUserChannels[i] = 0;
\r
9506 stream_.nDeviceChannels[i] = 0;
\r
9507 stream_.channelOffset[i] = 0;
\r
9508 stream_.deviceFormat[i] = 0;
\r
9509 stream_.latency[i] = 0;
\r
9510 stream_.userBuffer[i] = 0;
\r
9511 stream_.convertInfo[i].channels = 0;
\r
9512 stream_.convertInfo[i].inJump = 0;
\r
9513 stream_.convertInfo[i].outJump = 0;
\r
9514 stream_.convertInfo[i].inFormat = 0;
\r
9515 stream_.convertInfo[i].outFormat = 0;
\r
9516 stream_.convertInfo[i].inOffset.clear();
\r
9517 stream_.convertInfo[i].outOffset.clear();
\r
9521 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9523 if ( format == RTAUDIO_SINT16 )
\r
9525 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9527 else if ( format == RTAUDIO_FLOAT64 )
\r
9529 else if ( format == RTAUDIO_SINT24 )
\r
9531 else if ( format == RTAUDIO_SINT8 )
\r
9534 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9535 error( RtAudioError::WARNING );
\r
9540 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9542 if ( mode == INPUT ) { // convert device to user buffer
\r
9543 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9544 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9545 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9546 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9548 else { // convert user to device buffer
\r
9549 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9550 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9551 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9552 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9555 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9556 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9558 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9560 // Set up the interleave/deinterleave offsets.
\r
9561 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9562 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9563 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9564 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9565 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9566 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9567 stream_.convertInfo[mode].inJump = 1;
\r
9571 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9572 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9573 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9574 stream_.convertInfo[mode].outJump = 1;
\r
9578 else { // no (de)interleaving
\r
9579 if ( stream_.userInterleaved ) {
\r
9580 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9581 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9582 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9586 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9587 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9588 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9589 stream_.convertInfo[mode].inJump = 1;
\r
9590 stream_.convertInfo[mode].outJump = 1;
\r
9595 // Add channel offset.
\r
9596 if ( firstChannel > 0 ) {
\r
9597 if ( stream_.deviceInterleaved[mode] ) {
\r
9598 if ( mode == OUTPUT ) {
\r
9599 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9600 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9603 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9604 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9608 if ( mode == OUTPUT ) {
\r
9609 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9610 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9613 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9614 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9620 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9622 // This function does format conversion, input/output channel compensation, and
\r
9623 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9624 // the lower three bytes of a 32-bit integer.
\r
9626 // Clear our device buffer when in/out duplex device channels are different
\r
9627 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9628 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9629 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9632 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9634 Float64 *out = (Float64 *)outBuffer;
\r
9636 if (info.inFormat == RTAUDIO_SINT8) {
\r
9637 signed char *in = (signed char *)inBuffer;
\r
9638 scale = 1.0 / 127.5;
\r
9639 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9640 for (j=0; j<info.channels; j++) {
\r
9641 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9642 out[info.outOffset[j]] += 0.5;
\r
9643 out[info.outOffset[j]] *= scale;
\r
9645 in += info.inJump;
\r
9646 out += info.outJump;
\r
9649 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9650 Int16 *in = (Int16 *)inBuffer;
\r
9651 scale = 1.0 / 32767.5;
\r
9652 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9653 for (j=0; j<info.channels; j++) {
\r
9654 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9655 out[info.outOffset[j]] += 0.5;
\r
9656 out[info.outOffset[j]] *= scale;
\r
9658 in += info.inJump;
\r
9659 out += info.outJump;
\r
9662 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9663 Int24 *in = (Int24 *)inBuffer;
\r
9664 scale = 1.0 / 8388607.5;
\r
9665 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9666 for (j=0; j<info.channels; j++) {
\r
9667 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9668 out[info.outOffset[j]] += 0.5;
\r
9669 out[info.outOffset[j]] *= scale;
\r
9671 in += info.inJump;
\r
9672 out += info.outJump;
\r
9675 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9676 Int32 *in = (Int32 *)inBuffer;
\r
9677 scale = 1.0 / 2147483647.5;
\r
9678 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9679 for (j=0; j<info.channels; j++) {
\r
9680 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9681 out[info.outOffset[j]] += 0.5;
\r
9682 out[info.outOffset[j]] *= scale;
\r
9684 in += info.inJump;
\r
9685 out += info.outJump;
\r
9688 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9689 Float32 *in = (Float32 *)inBuffer;
\r
9690 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9691 for (j=0; j<info.channels; j++) {
\r
9692 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9694 in += info.inJump;
\r
9695 out += info.outJump;
\r
9698 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9699 // Channel compensation and/or (de)interleaving only.
\r
9700 Float64 *in = (Float64 *)inBuffer;
\r
9701 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9702 for (j=0; j<info.channels; j++) {
\r
9703 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9705 in += info.inJump;
\r
9706 out += info.outJump;
\r
9710 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9712 Float32 *out = (Float32 *)outBuffer;
\r
9714 if (info.inFormat == RTAUDIO_SINT8) {
\r
9715 signed char *in = (signed char *)inBuffer;
\r
9716 scale = (Float32) ( 1.0 / 127.5 );
\r
9717 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9718 for (j=0; j<info.channels; j++) {
\r
9719 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9720 out[info.outOffset[j]] += 0.5;
\r
9721 out[info.outOffset[j]] *= scale;
\r
9723 in += info.inJump;
\r
9724 out += info.outJump;
\r
9727 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9728 Int16 *in = (Int16 *)inBuffer;
\r
9729 scale = (Float32) ( 1.0 / 32767.5 );
\r
9730 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9731 for (j=0; j<info.channels; j++) {
\r
9732 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9733 out[info.outOffset[j]] += 0.5;
\r
9734 out[info.outOffset[j]] *= scale;
\r
9736 in += info.inJump;
\r
9737 out += info.outJump;
\r
9740 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9741 Int24 *in = (Int24 *)inBuffer;
\r
9742 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9743 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9744 for (j=0; j<info.channels; j++) {
\r
9745 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9746 out[info.outOffset[j]] += 0.5;
\r
9747 out[info.outOffset[j]] *= scale;
\r
9749 in += info.inJump;
\r
9750 out += info.outJump;
\r
9753 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9754 Int32 *in = (Int32 *)inBuffer;
\r
9755 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9756 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9757 for (j=0; j<info.channels; j++) {
\r
9758 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9759 out[info.outOffset[j]] += 0.5;
\r
9760 out[info.outOffset[j]] *= scale;
\r
9762 in += info.inJump;
\r
9763 out += info.outJump;
\r
9766 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9767 // Channel compensation and/or (de)interleaving only.
\r
9768 Float32 *in = (Float32 *)inBuffer;
\r
9769 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9770 for (j=0; j<info.channels; j++) {
\r
9771 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9773 in += info.inJump;
\r
9774 out += info.outJump;
\r
9777 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9778 Float64 *in = (Float64 *)inBuffer;
\r
9779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9780 for (j=0; j<info.channels; j++) {
\r
9781 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9783 in += info.inJump;
\r
9784 out += info.outJump;
\r
9788 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9789 Int32 *out = (Int32 *)outBuffer;
\r
9790 if (info.inFormat == RTAUDIO_SINT8) {
\r
9791 signed char *in = (signed char *)inBuffer;
\r
9792 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9793 for (j=0; j<info.channels; j++) {
\r
9794 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9795 out[info.outOffset[j]] <<= 24;
\r
9797 in += info.inJump;
\r
9798 out += info.outJump;
\r
9801 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9802 Int16 *in = (Int16 *)inBuffer;
\r
9803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9804 for (j=0; j<info.channels; j++) {
\r
9805 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9806 out[info.outOffset[j]] <<= 16;
\r
9808 in += info.inJump;
\r
9809 out += info.outJump;
\r
9812 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9813 Int24 *in = (Int24 *)inBuffer;
\r
9814 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9815 for (j=0; j<info.channels; j++) {
\r
9816 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9817 out[info.outOffset[j]] <<= 8;
\r
9819 in += info.inJump;
\r
9820 out += info.outJump;
\r
9823 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9824 // Channel compensation and/or (de)interleaving only.
\r
9825 Int32 *in = (Int32 *)inBuffer;
\r
9826 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9827 for (j=0; j<info.channels; j++) {
\r
9828 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9830 in += info.inJump;
\r
9831 out += info.outJump;
\r
9834 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9835 Float32 *in = (Float32 *)inBuffer;
\r
9836 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9837 for (j=0; j<info.channels; j++) {
\r
9838 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9840 in += info.inJump;
\r
9841 out += info.outJump;
\r
9844 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9845 Float64 *in = (Float64 *)inBuffer;
\r
9846 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9847 for (j=0; j<info.channels; j++) {
\r
9848 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9850 in += info.inJump;
\r
9851 out += info.outJump;
\r
9855 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9856 Int24 *out = (Int24 *)outBuffer;
\r
9857 if (info.inFormat == RTAUDIO_SINT8) {
\r
9858 signed char *in = (signed char *)inBuffer;
\r
9859 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9860 for (j=0; j<info.channels; j++) {
\r
9861 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9862 //out[info.outOffset[j]] <<= 16;
\r
9864 in += info.inJump;
\r
9865 out += info.outJump;
\r
9868 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9869 Int16 *in = (Int16 *)inBuffer;
\r
9870 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9871 for (j=0; j<info.channels; j++) {
\r
9872 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9873 //out[info.outOffset[j]] <<= 8;
\r
9875 in += info.inJump;
\r
9876 out += info.outJump;
\r
9879 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9880 // Channel compensation and/or (de)interleaving only.
\r
9881 Int24 *in = (Int24 *)inBuffer;
\r
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9883 for (j=0; j<info.channels; j++) {
\r
9884 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9886 in += info.inJump;
\r
9887 out += info.outJump;
\r
9890 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9891 Int32 *in = (Int32 *)inBuffer;
\r
9892 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9893 for (j=0; j<info.channels; j++) {
\r
9894 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9895 //out[info.outOffset[j]] >>= 8;
\r
9897 in += info.inJump;
\r
9898 out += info.outJump;
\r
9901 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9902 Float32 *in = (Float32 *)inBuffer;
\r
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9904 for (j=0; j<info.channels; j++) {
\r
9905 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9907 in += info.inJump;
\r
9908 out += info.outJump;
\r
9911 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9912 Float64 *in = (Float64 *)inBuffer;
\r
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9914 for (j=0; j<info.channels; j++) {
\r
9915 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9917 in += info.inJump;
\r
9918 out += info.outJump;
\r
9922 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9923 Int16 *out = (Int16 *)outBuffer;
\r
9924 if (info.inFormat == RTAUDIO_SINT8) {
\r
9925 signed char *in = (signed char *)inBuffer;
\r
9926 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9927 for (j=0; j<info.channels; j++) {
\r
9928 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9929 out[info.outOffset[j]] <<= 8;
\r
9931 in += info.inJump;
\r
9932 out += info.outJump;
\r
9935 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9936 // Channel compensation and/or (de)interleaving only.
\r
9937 Int16 *in = (Int16 *)inBuffer;
\r
9938 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9939 for (j=0; j<info.channels; j++) {
\r
9940 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9942 in += info.inJump;
\r
9943 out += info.outJump;
\r
9946 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9947 Int24 *in = (Int24 *)inBuffer;
\r
9948 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9949 for (j=0; j<info.channels; j++) {
\r
9950 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9952 in += info.inJump;
\r
9953 out += info.outJump;
\r
9956 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9957 Int32 *in = (Int32 *)inBuffer;
\r
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9959 for (j=0; j<info.channels; j++) {
\r
9960 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9962 in += info.inJump;
\r
9963 out += info.outJump;
\r
9966 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9967 Float32 *in = (Float32 *)inBuffer;
\r
9968 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9969 for (j=0; j<info.channels; j++) {
\r
9970 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9972 in += info.inJump;
\r
9973 out += info.outJump;
\r
9976 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9977 Float64 *in = (Float64 *)inBuffer;
\r
9978 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9979 for (j=0; j<info.channels; j++) {
\r
9980 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9982 in += info.inJump;
\r
9983 out += info.outJump;
\r
9987 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9988 signed char *out = (signed char *)outBuffer;
\r
9989 if (info.inFormat == RTAUDIO_SINT8) {
\r
9990 // Channel compensation and/or (de)interleaving only.
\r
9991 signed char *in = (signed char *)inBuffer;
\r
9992 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9993 for (j=0; j<info.channels; j++) {
\r
9994 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9996 in += info.inJump;
\r
9997 out += info.outJump;
\r
10000 if (info.inFormat == RTAUDIO_SINT16) {
\r
10001 Int16 *in = (Int16 *)inBuffer;
\r
10002 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10003 for (j=0; j<info.channels; j++) {
\r
10004 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10006 in += info.inJump;
\r
10007 out += info.outJump;
\r
10010 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10011 Int24 *in = (Int24 *)inBuffer;
\r
10012 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10013 for (j=0; j<info.channels; j++) {
\r
10014 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10016 in += info.inJump;
\r
10017 out += info.outJump;
\r
10020 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10021 Int32 *in = (Int32 *)inBuffer;
\r
10022 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10023 for (j=0; j<info.channels; j++) {
\r
10024 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10026 in += info.inJump;
\r
10027 out += info.outJump;
\r
10030 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10031 Float32 *in = (Float32 *)inBuffer;
\r
10032 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10033 for (j=0; j<info.channels; j++) {
\r
10034 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10036 in += info.inJump;
\r
10037 out += info.outJump;
\r
10040 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10041 Float64 *in = (Float64 *)inBuffer;
\r
10042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10043 for (j=0; j<info.channels; j++) {
\r
10044 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10046 in += info.inJump;
\r
10047 out += info.outJump;
\r
10053 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10054 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10055 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10057 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10059 register char val;
\r
10060 register char *ptr;
\r
10063 if ( format == RTAUDIO_SINT16 ) {
\r
10064 for ( unsigned int i=0; i<samples; i++ ) {
\r
10065 // Swap 1st and 2nd bytes.
\r
10067 *(ptr) = *(ptr+1);
\r
10070 // Increment 2 bytes.
\r
10074 else if ( format == RTAUDIO_SINT32 ||
\r
10075 format == RTAUDIO_FLOAT32 ) {
\r
10076 for ( unsigned int i=0; i<samples; i++ ) {
\r
10077 // Swap 1st and 4th bytes.
\r
10079 *(ptr) = *(ptr+3);
\r
10082 // Swap 2nd and 3rd bytes.
\r
10085 *(ptr) = *(ptr+1);
\r
10088 // Increment 3 more bytes.
\r
10092 else if ( format == RTAUDIO_SINT24 ) {
\r
10093 for ( unsigned int i=0; i<samples; i++ ) {
\r
10094 // Swap 1st and 3rd bytes.
\r
10096 *(ptr) = *(ptr+2);
\r
10099 // Increment 2 more bytes.
\r
10103 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10104 for ( unsigned int i=0; i<samples; i++ ) {
\r
10105 // Swap 1st and 8th bytes
\r
10107 *(ptr) = *(ptr+7);
\r
10110 // Swap 2nd and 7th bytes
\r
10113 *(ptr) = *(ptr+5);
\r
10116 // Swap 3rd and 6th bytes
\r
10119 *(ptr) = *(ptr+3);
\r
10122 // Swap 4th and 5th bytes
\r
10125 *(ptr) = *(ptr+1);
\r
10128 // Increment 5 more bytes.
\r
10134 // Indentation settings for Vim and Emacs
\r
10136 // Local Variables:
\r
10137 // c-basic-offset: 2
\r
10138 // indent-tabs-mode: nil
\r
10141 // vim: et sts=2 sw=2
\r