1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2013 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.12
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_PULSE__)
\r
91 apis.push_back( LINUX_PULSE );
\r
93 #if defined(__LINUX_OSS__)
\r
94 apis.push_back( LINUX_OSS );
\r
96 #if defined(__WINDOWS_ASIO__)
\r
97 apis.push_back( WINDOWS_ASIO );
\r
99 #if defined(__WINDOWS_DS__)
\r
100 apis.push_back( WINDOWS_DS );
\r
102 #if defined(__MACOSX_CORE__)
\r
103 apis.push_back( MACOSX_CORE );
\r
105 #if defined(__RTAUDIO_DUMMY__)
\r
106 apis.push_back( RTAUDIO_DUMMY );
\r
110 void RtAudio :: openRtApi( RtAudio::Api api )
\r
116 #if defined(__UNIX_JACK__)
\r
117 if ( api == UNIX_JACK )
\r
118 rtapi_ = new RtApiJack();
\r
120 #if defined(__LINUX_ALSA__)
\r
121 if ( api == LINUX_ALSA )
\r
122 rtapi_ = new RtApiAlsa();
\r
124 #if defined(__LINUX_PULSE__)
\r
125 if ( api == LINUX_PULSE )
\r
126 rtapi_ = new RtApiPulse();
\r
128 #if defined(__LINUX_OSS__)
\r
129 if ( api == LINUX_OSS )
\r
130 rtapi_ = new RtApiOss();
\r
132 #if defined(__WINDOWS_ASIO__)
\r
133 if ( api == WINDOWS_ASIO )
\r
134 rtapi_ = new RtApiAsio();
\r
136 #if defined(__WINDOWS_DS__)
\r
137 if ( api == WINDOWS_DS )
\r
138 rtapi_ = new RtApiDs();
\r
140 #if defined(__MACOSX_CORE__)
\r
141 if ( api == MACOSX_CORE )
\r
142 rtapi_ = new RtApiCore();
\r
144 #if defined(__RTAUDIO_DUMMY__)
\r
145 if ( api == RTAUDIO_DUMMY )
\r
146 rtapi_ = new RtApiDummy();
\r
150 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
154 if ( api != UNSPECIFIED ) {
\r
155 // Attempt to open the specified API.
\r
157 if ( rtapi_ ) return;
\r
159 // No compiled support for specified API value. Issue a debug
\r
160 // warning and continue as if no API was specified.
\r
161 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
164 // Iterate through the compiled APIs and return as soon as we find
\r
165 // one with at least one device or we reach the end of the list.
\r
166 std::vector< RtAudio::Api > apis;
\r
167 getCompiledApi( apis );
\r
168 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
169 openRtApi( apis[i] );
\r
170 if ( rtapi_->getDeviceCount() ) break;
\r
173 if ( rtapi_ ) return;
\r
175 // It should not be possible to get here because the preprocessor
\r
176 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
177 // API-specific definitions are passed to the compiler. But just in
\r
178 // case something weird happens, we'll print out an error message.
\r
179 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
182 RtAudio :: ~RtAudio() throw()
\r
187 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
188 RtAudio::StreamParameters *inputParameters,
\r
189 RtAudioFormat format, unsigned int sampleRate,
\r
190 unsigned int *bufferFrames,
\r
191 RtAudioCallback callback, void *userData,
\r
192 RtAudio::StreamOptions *options,
\r
193 RtAudioErrorCallback errorCallback )
\r
195 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
196 sampleRate, bufferFrames, callback,
\r
197 userData, options, errorCallback );
\r
200 // *************************************************** //
\r
202 // Public RtApi definitions (see end of file for
\r
203 // private or protected utility functions).
\r
205 // *************************************************** //
\r
209 stream_.state = STREAM_CLOSED;
\r
210 stream_.mode = UNINITIALIZED;
\r
211 stream_.apiHandle = 0;
\r
212 stream_.userBuffer[0] = 0;
\r
213 stream_.userBuffer[1] = 0;
\r
214 MUTEX_INITIALIZE( &stream_.mutex );
\r
215 showWarnings_ = true;
\r
216 firstErrorOccurred = false;
\r
221 MUTEX_DESTROY( &stream_.mutex );
\r
224 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
225 RtAudio::StreamParameters *iParams,
\r
226 RtAudioFormat format, unsigned int sampleRate,
\r
227 unsigned int *bufferFrames,
\r
228 RtAudioCallback callback, void *userData,
\r
229 RtAudio::StreamOptions *options,
\r
230 RtAudioErrorCallback errorCallback )
\r
232 if ( stream_.state != STREAM_CLOSED ) {
\r
233 errorText_ = "RtApi::openStream: a stream is already open!";
\r
234 error( RtAudioError::INVALID_USE );
\r
238 if ( oParams && oParams->nChannels < 1 ) {
\r
239 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
240 error( RtAudioError::INVALID_USE );
\r
244 if ( iParams && iParams->nChannels < 1 ) {
\r
245 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
246 error( RtAudioError::INVALID_USE );
\r
250 if ( oParams == NULL && iParams == NULL ) {
\r
251 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
252 error( RtAudioError::INVALID_USE );
\r
256 if ( formatBytes(format) == 0 ) {
\r
257 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
258 error( RtAudioError::INVALID_USE );
\r
262 unsigned int nDevices = getDeviceCount();
\r
263 unsigned int oChannels = 0;
\r
265 oChannels = oParams->nChannels;
\r
266 if ( oParams->deviceId >= nDevices ) {
\r
267 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
268 error( RtAudioError::INVALID_USE );
\r
273 unsigned int iChannels = 0;
\r
275 iChannels = iParams->nChannels;
\r
276 if ( iParams->deviceId >= nDevices ) {
\r
277 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
278 error( RtAudioError::INVALID_USE );
\r
286 if ( oChannels > 0 ) {
\r
288 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
289 sampleRate, format, bufferFrames, options );
\r
290 if ( result == false ) {
\r
291 error( RtAudioError::SYSTEM_ERROR );
\r
296 if ( iChannels > 0 ) {
\r
298 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
299 sampleRate, format, bufferFrames, options );
\r
300 if ( result == false ) {
\r
301 if ( oChannels > 0 ) closeStream();
\r
302 error( RtAudioError::SYSTEM_ERROR );
\r
307 stream_.callbackInfo.callback = (void *) callback;
\r
308 stream_.callbackInfo.userData = userData;
\r
309 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
311 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
312 stream_.state = STREAM_STOPPED;
\r
315 unsigned int RtApi :: getDefaultInputDevice( void )
\r
317 // Should be implemented in subclasses if possible.
\r
321 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
323 // Should be implemented in subclasses if possible.
\r
327 void RtApi :: closeStream( void )
\r
329 // MUST be implemented in subclasses!
\r
333 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
334 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
335 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
336 RtAudio::StreamOptions * /*options*/ )
\r
338 // MUST be implemented in subclasses!
\r
342 void RtApi :: tickStreamTime( void )
\r
344 // Subclasses that do not provide their own implementation of
\r
345 // getStreamTime should call this function once per buffer I/O to
\r
346 // provide basic stream time support.
\r
348 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
350 #if defined( HAVE_GETTIMEOFDAY )
\r
351 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
355 long RtApi :: getStreamLatency( void )
\r
359 long totalLatency = 0;
\r
360 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
361 totalLatency = stream_.latency[0];
\r
362 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
363 totalLatency += stream_.latency[1];
\r
365 return totalLatency;
\r
368 double RtApi :: getStreamTime( void )
\r
372 #if defined( HAVE_GETTIMEOFDAY )
\r
373 // Return a very accurate estimate of the stream time by
\r
374 // adding in the elapsed time since the last tick.
\r
375 struct timeval then;
\r
376 struct timeval now;
\r
378 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
379 return stream_.streamTime;
\r
381 gettimeofday( &now, NULL );
\r
382 then = stream_.lastTickTimestamp;
\r
383 return stream_.streamTime +
\r
384 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
385 (then.tv_sec + 0.000001 * then.tv_usec));
\r
387 return stream_.streamTime;
\r
391 unsigned int RtApi :: getStreamSampleRate( void )
\r
395 return stream_.sampleRate;
\r
399 // *************************************************** //
\r
401 // OS/API-specific methods.
\r
403 // *************************************************** //
\r
405 #if defined(__MACOSX_CORE__)
\r
407 // The OS X CoreAudio API is designed to use a separate callback
\r
408 // procedure for each of its audio devices. A single RtAudio duplex
\r
409 // stream using two different devices is supported here, though it
\r
410 // cannot be guaranteed to always behave correctly because we cannot
\r
411 // synchronize these two callbacks.
\r
413 // A property listener is installed for over/underrun information.
\r
414 // However, no functionality is currently provided to allow property
\r
415 // listeners to trigger user handlers because it is unclear what could
\r
416 // be done if a critical stream parameter (buffer size, sample rate,
\r
417 // device disconnect) notification arrived. The listeners entail
\r
418 // quite a bit of extra code and most likely, a user program wouldn't
\r
419 // be prepared for the result anyway. However, we do provide a flag
\r
420 // to the client callback function to inform of an over/underrun.
\r
422 // A structure to hold various information related to the CoreAudio API
\r
424 struct CoreHandle {
\r
425 AudioDeviceID id[2]; // device ids
\r
426 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
427 AudioDeviceIOProcID procId[2];
\r
429 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
430 UInt32 nStreams[2]; // number of streams to use
\r
432 char *deviceBuffer;
\r
433 pthread_cond_t condition;
\r
434 int drainCounter; // Tracks callback counts when draining
\r
435 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
438 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
441 RtApiCore:: RtApiCore()
\r
443 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
444 // This is a largely undocumented but absolutely necessary
\r
445 // requirement starting with OS-X 10.6. If not called, queries and
\r
446 // updates to various audio device properties are not handled
\r
448 CFRunLoopRef theRunLoop = NULL;
\r
449 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
450 kAudioObjectPropertyScopeGlobal,
\r
451 kAudioObjectPropertyElementMaster };
\r
452 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
453 if ( result != noErr ) {
\r
454 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
455 error( RtAudioError::WARNING );
\r
460 RtApiCore :: ~RtApiCore()
\r
462 // The subclass destructor gets called before the base class
\r
463 // destructor, so close an existing stream before deallocating
\r
464 // apiDeviceId memory.
\r
465 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
468 unsigned int RtApiCore :: getDeviceCount( void )
\r
470 // Find out how many audio devices there are, if any.
\r
472 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
473 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
474 if ( result != noErr ) {
\r
475 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
476 error( RtAudioError::WARNING );
\r
480 return dataSize / sizeof( AudioDeviceID );
\r
483 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
485 unsigned int nDevices = getDeviceCount();
\r
486 if ( nDevices <= 1 ) return 0;
\r
489 UInt32 dataSize = sizeof( AudioDeviceID );
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
491 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
492 if ( result != noErr ) {
\r
493 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
494 error( RtAudioError::WARNING );
\r
498 dataSize *= nDevices;
\r
499 AudioDeviceID deviceList[ nDevices ];
\r
500 property.mSelector = kAudioHardwarePropertyDevices;
\r
501 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
502 if ( result != noErr ) {
\r
503 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
504 error( RtAudioError::WARNING );
\r
508 for ( unsigned int i=0; i<nDevices; i++ )
\r
509 if ( id == deviceList[i] ) return i;
\r
511 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
512 error( RtAudioError::WARNING );
\r
516 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
518 unsigned int nDevices = getDeviceCount();
\r
519 if ( nDevices <= 1 ) return 0;
\r
522 UInt32 dataSize = sizeof( AudioDeviceID );
\r
523 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
524 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
525 if ( result != noErr ) {
\r
526 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
527 error( RtAudioError::WARNING );
\r
531 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
532 AudioDeviceID deviceList[ nDevices ];
\r
533 property.mSelector = kAudioHardwarePropertyDevices;
\r
534 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
535 if ( result != noErr ) {
\r
536 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
537 error( RtAudioError::WARNING );
\r
541 for ( unsigned int i=0; i<nDevices; i++ )
\r
542 if ( id == deviceList[i] ) return i;
\r
544 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
545 error( RtAudioError::WARNING );
\r
549 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
551 RtAudio::DeviceInfo info;
\r
552 info.probed = false;
\r
555 unsigned int nDevices = getDeviceCount();
\r
556 if ( nDevices == 0 ) {
\r
557 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
558 error( RtAudioError::INVALID_USE );
\r
562 if ( device >= nDevices ) {
\r
563 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
564 error( RtAudioError::INVALID_USE );
\r
568 AudioDeviceID deviceList[ nDevices ];
\r
569 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
570 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
571 kAudioObjectPropertyScopeGlobal,
\r
572 kAudioObjectPropertyElementMaster };
\r
573 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
574 0, NULL, &dataSize, (void *) &deviceList );
\r
575 if ( result != noErr ) {
\r
576 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
577 error( RtAudioError::WARNING );
\r
581 AudioDeviceID id = deviceList[ device ];
\r
583 // Get the device name.
\r
585 CFStringRef cfname;
\r
586 dataSize = sizeof( CFStringRef );
\r
587 property.mSelector = kAudioObjectPropertyManufacturer;
\r
588 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
589 if ( result != noErr ) {
\r
590 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
591 errorText_ = errorStream_.str();
\r
592 error( RtAudioError::WARNING );
\r
596 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
597 int length = CFStringGetLength(cfname);
\r
598 char *mname = (char *)malloc(length * 3 + 1);
\r
599 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
600 info.name.append( (const char *)mname, strlen(mname) );
\r
601 info.name.append( ": " );
\r
602 CFRelease( cfname );
\r
605 property.mSelector = kAudioObjectPropertyName;
\r
606 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
607 if ( result != noErr ) {
\r
608 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
609 errorText_ = errorStream_.str();
\r
610 error( RtAudioError::WARNING );
\r
614 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
615 length = CFStringGetLength(cfname);
\r
616 char *name = (char *)malloc(length * 3 + 1);
\r
617 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
618 info.name.append( (const char *)name, strlen(name) );
\r
619 CFRelease( cfname );
\r
622 // Get the output stream "configuration".
\r
623 AudioBufferList *bufferList = nil;
\r
624 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
625 property.mScope = kAudioDevicePropertyScopeOutput;
\r
626 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
628 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
629 if ( result != noErr || dataSize == 0 ) {
\r
630 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
631 errorText_ = errorStream_.str();
\r
632 error( RtAudioError::WARNING );
\r
636 // Allocate the AudioBufferList.
\r
637 bufferList = (AudioBufferList *) malloc( dataSize );
\r
638 if ( bufferList == NULL ) {
\r
639 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
640 error( RtAudioError::WARNING );
\r
644 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
645 if ( result != noErr || dataSize == 0 ) {
\r
646 free( bufferList );
\r
647 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
648 errorText_ = errorStream_.str();
\r
649 error( RtAudioError::WARNING );
\r
653 // Get output channel information.
\r
654 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
655 for ( i=0; i<nStreams; i++ )
\r
656 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
657 free( bufferList );
\r
659 // Get the input stream "configuration".
\r
660 property.mScope = kAudioDevicePropertyScopeInput;
\r
661 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
662 if ( result != noErr || dataSize == 0 ) {
\r
663 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
664 errorText_ = errorStream_.str();
\r
665 error( RtAudioError::WARNING );
\r
669 // Allocate the AudioBufferList.
\r
670 bufferList = (AudioBufferList *) malloc( dataSize );
\r
671 if ( bufferList == NULL ) {
\r
672 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
673 error( RtAudioError::WARNING );
\r
677 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
678 if (result != noErr || dataSize == 0) {
\r
679 free( bufferList );
\r
680 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
681 errorText_ = errorStream_.str();
\r
682 error( RtAudioError::WARNING );
\r
686 // Get input channel information.
\r
687 nStreams = bufferList->mNumberBuffers;
\r
688 for ( i=0; i<nStreams; i++ )
\r
689 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
690 free( bufferList );
\r
692 // If device opens for both playback and capture, we determine the channels.
\r
693 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
694 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
696 // Probe the device sample rates.
\r
697 bool isInput = false;
\r
698 if ( info.outputChannels == 0 ) isInput = true;
\r
700 // Determine the supported sample rates.
\r
701 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
702 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
703 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
704 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
705 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
706 errorText_ = errorStream_.str();
\r
707 error( RtAudioError::WARNING );
\r
711 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
712 AudioValueRange rangeList[ nRanges ];
\r
713 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
714 if ( result != kAudioHardwareNoError ) {
\r
715 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
716 errorText_ = errorStream_.str();
\r
717 error( RtAudioError::WARNING );
\r
721 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
722 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
723 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
724 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
727 info.sampleRates.clear();
\r
728 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
729 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
730 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
733 if ( info.sampleRates.size() == 0 ) {
\r
734 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
735 errorText_ = errorStream_.str();
\r
736 error( RtAudioError::WARNING );
\r
740 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
741 // Thus, any other "physical" formats supported by the device are of
\r
742 // no interest to the client.
\r
743 info.nativeFormats = RTAUDIO_FLOAT32;
\r
745 if ( info.outputChannels > 0 )
\r
746 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
747 if ( info.inputChannels > 0 )
\r
748 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
750 info.probed = true;
\r
754 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
755 const AudioTimeStamp* /*inNow*/,
\r
756 const AudioBufferList* inInputData,
\r
757 const AudioTimeStamp* /*inInputTime*/,
\r
758 AudioBufferList* outOutputData,
\r
759 const AudioTimeStamp* /*inOutputTime*/,
\r
760 void* infoPointer )
\r
762 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
764 RtApiCore *object = (RtApiCore *) info->object;
\r
765 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
766 return kAudioHardwareUnspecifiedError;
\r
768 return kAudioHardwareNoError;
\r
771 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
773 const AudioObjectPropertyAddress properties[],
\r
774 void* handlePointer )
\r
776 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
777 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
778 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
779 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
780 handle->xrun[1] = true;
\r
782 handle->xrun[0] = true;
\r
786 return kAudioHardwareNoError;
\r
789 static OSStatus rateListener( AudioObjectID inDevice,
\r
790 UInt32 /*nAddresses*/,
\r
791 const AudioObjectPropertyAddress /*properties*/[],
\r
792 void* ratePointer )
\r
795 Float64 *rate = (Float64 *) ratePointer;
\r
796 UInt32 dataSize = sizeof( Float64 );
\r
797 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
798 kAudioObjectPropertyScopeGlobal,
\r
799 kAudioObjectPropertyElementMaster };
\r
800 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
801 return kAudioHardwareNoError;
\r
804 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
805 unsigned int firstChannel, unsigned int sampleRate,
\r
806 RtAudioFormat format, unsigned int *bufferSize,
\r
807 RtAudio::StreamOptions *options )
\r
810 unsigned int nDevices = getDeviceCount();
\r
811 if ( nDevices == 0 ) {
\r
812 // This should not happen because a check is made before this function is called.
\r
813 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
817 if ( device >= nDevices ) {
\r
818 // This should not happen because a check is made before this function is called.
\r
819 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
823 AudioDeviceID deviceList[ nDevices ];
\r
824 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
825 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
826 kAudioObjectPropertyScopeGlobal,
\r
827 kAudioObjectPropertyElementMaster };
\r
828 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
829 0, NULL, &dataSize, (void *) &deviceList );
\r
830 if ( result != noErr ) {
\r
831 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
835 AudioDeviceID id = deviceList[ device ];
\r
837 // Setup for stream mode.
\r
838 bool isInput = false;
\r
839 if ( mode == INPUT ) {
\r
841 property.mScope = kAudioDevicePropertyScopeInput;
\r
844 property.mScope = kAudioDevicePropertyScopeOutput;
\r
846 // Get the stream "configuration".
\r
847 AudioBufferList *bufferList = nil;
\r
849 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
850 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
851 if ( result != noErr || dataSize == 0 ) {
\r
852 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
853 errorText_ = errorStream_.str();
\r
857 // Allocate the AudioBufferList.
\r
858 bufferList = (AudioBufferList *) malloc( dataSize );
\r
859 if ( bufferList == NULL ) {
\r
860 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
864 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
865 if (result != noErr || dataSize == 0) {
\r
866 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
867 errorText_ = errorStream_.str();
\r
871 // Search for one or more streams that contain the desired number of
\r
872 // channels. CoreAudio devices can have an arbitrary number of
\r
873 // streams and each stream can have an arbitrary number of channels.
\r
874 // For each stream, a single buffer of interleaved samples is
\r
875 // provided. RtAudio prefers the use of one stream of interleaved
\r
876 // data or multiple consecutive single-channel streams. However, we
\r
877 // now support multiple consecutive multi-channel streams of
\r
878 // interleaved data as well.
\r
879 UInt32 iStream, offsetCounter = firstChannel;
\r
880 UInt32 nStreams = bufferList->mNumberBuffers;
\r
881 bool monoMode = false;
\r
882 bool foundStream = false;
\r
884 // First check that the device supports the requested number of
\r
886 UInt32 deviceChannels = 0;
\r
887 for ( iStream=0; iStream<nStreams; iStream++ )
\r
888 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
890 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
891 free( bufferList );
\r
892 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
893 errorText_ = errorStream_.str();
\r
897 // Look for a single stream meeting our needs.
\r
898 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
899 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
900 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
901 if ( streamChannels >= channels + offsetCounter ) {
\r
902 firstStream = iStream;
\r
903 channelOffset = offsetCounter;
\r
904 foundStream = true;
\r
907 if ( streamChannels > offsetCounter ) break;
\r
908 offsetCounter -= streamChannels;
\r
911 // If we didn't find a single stream above, then we should be able
\r
912 // to meet the channel specification with multiple streams.
\r
913 if ( foundStream == false ) {
\r
915 offsetCounter = firstChannel;
\r
916 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
917 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
918 if ( streamChannels > offsetCounter ) break;
\r
919 offsetCounter -= streamChannels;
\r
922 firstStream = iStream;
\r
923 channelOffset = offsetCounter;
\r
924 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
926 if ( streamChannels > 1 ) monoMode = false;
\r
927 while ( channelCounter > 0 ) {
\r
928 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
929 if ( streamChannels > 1 ) monoMode = false;
\r
930 channelCounter -= streamChannels;
\r
935 free( bufferList );
\r
937 // Determine the buffer size.
\r
938 AudioValueRange bufferRange;
\r
939 dataSize = sizeof( AudioValueRange );
\r
940 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
941 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
943 if ( result != noErr ) {
\r
944 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
945 errorText_ = errorStream_.str();
\r
949 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
950 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
951 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
953 // Set the buffer size. For multiple streams, I'm assuming we only
\r
954 // need to make this setting for the master channel.
\r
955 UInt32 theSize = (UInt32) *bufferSize;
\r
956 dataSize = sizeof( UInt32 );
\r
957 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
958 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
960 if ( result != noErr ) {
\r
961 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
962 errorText_ = errorStream_.str();
\r
966 // If attempting to setup a duplex stream, the bufferSize parameter
\r
967 // MUST be the same in both directions!
\r
968 *bufferSize = theSize;
\r
969 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
970 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
971 errorText_ = errorStream_.str();
\r
975 stream_.bufferSize = *bufferSize;
\r
976 stream_.nBuffers = 1;
\r
978 // Try to set "hog" mode ... it's not clear to me this is working.
\r
979 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
981 dataSize = sizeof( hog_pid );
\r
982 property.mSelector = kAudioDevicePropertyHogMode;
\r
983 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
984 if ( result != noErr ) {
\r
985 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
986 errorText_ = errorStream_.str();
\r
990 if ( hog_pid != getpid() ) {
\r
991 hog_pid = getpid();
\r
992 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
993 if ( result != noErr ) {
\r
994 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
995 errorText_ = errorStream_.str();
\r
1001 // Check and if necessary, change the sample rate for the device.
\r
1002 Float64 nominalRate;
\r
1003 dataSize = sizeof( Float64 );
\r
1004 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1005 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1007 if ( result != noErr ) {
\r
1008 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1009 errorText_ = errorStream_.str();
\r
1013 // Only change the sample rate if off by more than 1 Hz.
\r
1014 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1016 // Set a property listener for the sample rate change
\r
1017 Float64 reportedRate = 0.0;
\r
1018 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1019 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 nominalRate = (Float64) sampleRate;
\r
1027 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1029 if ( result != noErr ) {
\r
1030 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1031 errorText_ = errorStream_.str();
\r
1035 // Now wait until the reported nominal rate is what we just set.
\r
1036 UInt32 microCounter = 0;
\r
1037 while ( reportedRate != nominalRate ) {
\r
1038 microCounter += 5000;
\r
1039 if ( microCounter > 5000000 ) break;
\r
1043 // Remove the property listener.
\r
1044 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1046 if ( microCounter > 5000000 ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1053 // Now set the stream format for all streams. Also, check the
\r
1054 // physical format of the device and change that if necessary.
\r
1055 AudioStreamBasicDescription description;
\r
1056 dataSize = sizeof( AudioStreamBasicDescription );
\r
1057 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1058 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1059 if ( result != noErr ) {
\r
1060 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1061 errorText_ = errorStream_.str();
\r
1065 // Set the sample rate and data format id. However, only make the
\r
1066 // change if the sample rate is not within 1.0 of the desired
\r
1067 // rate and the format is not linear pcm.
\r
1068 bool updateFormat = false;
\r
1069 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1070 description.mSampleRate = (Float64) sampleRate;
\r
1071 updateFormat = true;
\r
1074 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1075 description.mFormatID = kAudioFormatLinearPCM;
\r
1076 updateFormat = true;
\r
1079 if ( updateFormat ) {
\r
1080 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1081 if ( result != noErr ) {
\r
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1083 errorText_ = errorStream_.str();
\r
1088 // Now check the physical format.
\r
1089 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1090 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1091 if ( result != noErr ) {
\r
1092 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1093 errorText_ = errorStream_.str();
\r
1097 //std::cout << "Current physical stream format:" << std::endl;
\r
1098 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1099 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1100 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1101 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1103 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1104 description.mFormatID = kAudioFormatLinearPCM;
\r
1105 //description.mSampleRate = (Float64) sampleRate;
\r
1106 AudioStreamBasicDescription testDescription = description;
\r
1107 UInt32 formatFlags;
\r
1109 // We'll try higher bit rates first and then work our way down.
\r
1110 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1111 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1112 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1113 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1114 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1115 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1116 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1117 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1118 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1119 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1120 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1121 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1122 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1124 bool setPhysicalFormat = false;
\r
1125 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1126 testDescription = description;
\r
1127 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1128 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1129 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1130 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1132 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1133 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1134 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1135 if ( result == noErr ) {
\r
1136 setPhysicalFormat = true;
\r
1137 //std::cout << "Updated physical stream format:" << std::endl;
\r
1138 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1139 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1140 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1141 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1146 if ( !setPhysicalFormat ) {
\r
1147 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1148 errorText_ = errorStream_.str();
\r
1151 } // done setting virtual/physical formats.
\r
1153 // Get the stream / device latency.
\r
1155 dataSize = sizeof( UInt32 );
\r
1156 property.mSelector = kAudioDevicePropertyLatency;
\r
1157 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1158 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1159 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1161 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1162 errorText_ = errorStream_.str();
\r
1163 error( RtAudioError::WARNING );
\r
1167 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1168 // always be presented in native-endian format, so we should never
\r
1169 // need to byte swap.
\r
1170 stream_.doByteSwap[mode] = false;
\r
1172 // From the CoreAudio documentation, PCM data must be supplied as
\r
1174 stream_.userFormat = format;
\r
1175 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1177 if ( streamCount == 1 )
\r
1178 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1179 else // multiple streams
\r
1180 stream_.nDeviceChannels[mode] = channels;
\r
1181 stream_.nUserChannels[mode] = channels;
\r
1182 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1183 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1184 else stream_.userInterleaved = true;
\r
1185 stream_.deviceInterleaved[mode] = true;
\r
1186 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1188 // Set flags for buffer conversion.
\r
1189 stream_.doConvertBuffer[mode] = false;
\r
1190 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1191 stream_.doConvertBuffer[mode] = true;
\r
1192 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1193 stream_.doConvertBuffer[mode] = true;
\r
1194 if ( streamCount == 1 ) {
\r
1195 if ( stream_.nUserChannels[mode] > 1 &&
\r
1196 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1197 stream_.doConvertBuffer[mode] = true;
\r
1199 else if ( monoMode && stream_.userInterleaved )
\r
1200 stream_.doConvertBuffer[mode] = true;
\r
1202 // Allocate our CoreHandle structure for the stream.
\r
1203 CoreHandle *handle = 0;
\r
1204 if ( stream_.apiHandle == 0 ) {
\r
1206 handle = new CoreHandle;
\r
1208 catch ( std::bad_alloc& ) {
\r
1209 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1213 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1214 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1217 stream_.apiHandle = (void *) handle;
\r
1220 handle = (CoreHandle *) stream_.apiHandle;
\r
1221 handle->iStream[mode] = firstStream;
\r
1222 handle->nStreams[mode] = streamCount;
\r
1223 handle->id[mode] = id;
\r
1225 // Allocate necessary internal buffers.
\r
1226 unsigned long bufferBytes;
\r
1227 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1228 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1229 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1230 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1231 if ( stream_.userBuffer[mode] == NULL ) {
\r
1232 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1236 // If possible, we will make use of the CoreAudio stream buffers as
\r
1237 // "device buffers". However, we can't do this if using multiple
\r
1239 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1241 bool makeBuffer = true;
\r
1242 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1243 if ( mode == INPUT ) {
\r
1244 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1245 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1246 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1250 if ( makeBuffer ) {
\r
1251 bufferBytes *= *bufferSize;
\r
1252 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1253 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1254 if ( stream_.deviceBuffer == NULL ) {
\r
1255 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1261 stream_.sampleRate = sampleRate;
\r
1262 stream_.device[mode] = device;
\r
1263 stream_.state = STREAM_STOPPED;
\r
1264 stream_.callbackInfo.object = (void *) this;
\r
1266 // Setup the buffer conversion information structure.
\r
1267 if ( stream_.doConvertBuffer[mode] ) {
\r
1268 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1269 else setConvertInfo( mode, channelOffset );
\r
1272 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1273 // Only one callback procedure per device.
\r
1274 stream_.mode = DUPLEX;
\r
1276 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1277 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1279 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1280 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1282 if ( result != noErr ) {
\r
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1284 errorText_ = errorStream_.str();
\r
1287 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1288 stream_.mode = DUPLEX;
\r
1290 stream_.mode = mode;
\r
1293 // Setup the device property listener for over/underload.
\r
1294 property.mSelector = kAudioDeviceProcessorOverload;
\r
1295 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1301 pthread_cond_destroy( &handle->condition );
\r
1303 stream_.apiHandle = 0;
\r
1306 for ( int i=0; i<2; i++ ) {
\r
1307 if ( stream_.userBuffer[i] ) {
\r
1308 free( stream_.userBuffer[i] );
\r
1309 stream_.userBuffer[i] = 0;
\r
1313 if ( stream_.deviceBuffer ) {
\r
1314 free( stream_.deviceBuffer );
\r
1315 stream_.deviceBuffer = 0;
\r
1318 stream_.state = STREAM_CLOSED;
\r
1322 void RtApiCore :: closeStream( void )
\r
1324 if ( stream_.state == STREAM_CLOSED ) {
\r
1325 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1326 error( RtAudioError::WARNING );
\r
1330 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1331 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1332 if ( stream_.state == STREAM_RUNNING )
\r
1333 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1334 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1335 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1337 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1338 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1342 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1343 if ( stream_.state == STREAM_RUNNING )
\r
1344 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1345 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1346 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1348 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1349 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1353 for ( int i=0; i<2; i++ ) {
\r
1354 if ( stream_.userBuffer[i] ) {
\r
1355 free( stream_.userBuffer[i] );
\r
1356 stream_.userBuffer[i] = 0;
\r
1360 if ( stream_.deviceBuffer ) {
\r
1361 free( stream_.deviceBuffer );
\r
1362 stream_.deviceBuffer = 0;
\r
1365 // Destroy pthread condition variable.
\r
1366 pthread_cond_destroy( &handle->condition );
\r
1368 stream_.apiHandle = 0;
\r
1370 stream_.mode = UNINITIALIZED;
\r
1371 stream_.state = STREAM_CLOSED;
\r
1374 void RtApiCore :: startStream( void )
\r
1377 if ( stream_.state == STREAM_RUNNING ) {
\r
1378 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1379 error( RtAudioError::WARNING );
\r
1383 OSStatus result = noErr;
\r
1384 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1387 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1388 if ( result != noErr ) {
\r
1389 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1390 errorText_ = errorStream_.str();
\r
1395 if ( stream_.mode == INPUT ||
\r
1396 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1398 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1399 if ( result != noErr ) {
\r
1400 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1401 errorText_ = errorStream_.str();
\r
1406 handle->drainCounter = 0;
\r
1407 handle->internalDrain = false;
\r
1408 stream_.state = STREAM_RUNNING;
\r
1411 if ( result == noErr ) return;
\r
1412 error( RtAudioError::SYSTEM_ERROR );
\r
1415 void RtApiCore :: stopStream( void )
\r
1418 if ( stream_.state == STREAM_STOPPED ) {
\r
1419 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1420 error( RtAudioError::WARNING );
\r
1424 OSStatus result = noErr;
\r
1425 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1426 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1428 if ( handle->drainCounter == 0 ) {
\r
1429 handle->drainCounter = 2;
\r
1430 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1433 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1434 if ( result != noErr ) {
\r
1435 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1436 errorText_ = errorStream_.str();
\r
1441 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1443 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1444 if ( result != noErr ) {
\r
1445 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1446 errorText_ = errorStream_.str();
\r
1451 stream_.state = STREAM_STOPPED;
\r
1454 if ( result == noErr ) return;
\r
1455 error( RtAudioError::SYSTEM_ERROR );
\r
1458 void RtApiCore :: abortStream( void )
\r
1461 if ( stream_.state == STREAM_STOPPED ) {
\r
1462 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1463 error( RtAudioError::WARNING );
\r
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1468 handle->drainCounter = 2;
\r
1473 // This function will be called by a spawned thread when the user
\r
1474 // callback function signals that the stream should be stopped or
\r
1475 // aborted. It is better to handle it this way because the
\r
1476 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1477 // function is called.
\r
1478 static void *coreStopStream( void *ptr )
\r
1480 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1481 RtApiCore *object = (RtApiCore *) info->object;
\r
1483 object->stopStream();
\r
1484 pthread_exit( NULL );
\r
1487 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1488 const AudioBufferList *inBufferList,
\r
1489 const AudioBufferList *outBufferList )
\r
1491 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1492 if ( stream_.state == STREAM_CLOSED ) {
\r
1493 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1494 error( RtAudioError::WARNING );
\r
1498 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1499 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1501 // Check if we were draining the stream and signal is finished.
\r
1502 if ( handle->drainCounter > 3 ) {
\r
1503 ThreadHandle threadId;
\r
1505 stream_.state = STREAM_STOPPING;
\r
1506 if ( handle->internalDrain == true )
\r
1507 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1508 else // external call to stopStream()
\r
1509 pthread_cond_signal( &handle->condition );
\r
1513 AudioDeviceID outputDevice = handle->id[0];
\r
1515 // Invoke user callback to get fresh output data UNLESS we are
\r
1516 // draining stream or duplex mode AND the input/output devices are
\r
1517 // different AND this function is called for the input device.
\r
1518 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1519 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1520 double streamTime = getStreamTime();
\r
1521 RtAudioStreamStatus status = 0;
\r
1522 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1523 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1524 handle->xrun[0] = false;
\r
1526 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1527 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1528 handle->xrun[1] = false;
\r
1531 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1532 stream_.bufferSize, streamTime, status, info->userData );
\r
1533 if ( cbReturnValue == 2 ) {
\r
1534 stream_.state = STREAM_STOPPING;
\r
1535 handle->drainCounter = 2;
\r
1539 else if ( cbReturnValue == 1 ) {
\r
1540 handle->drainCounter = 1;
\r
1541 handle->internalDrain = true;
\r
1545 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1547 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1549 if ( handle->nStreams[0] == 1 ) {
\r
1550 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1552 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1554 else { // fill multiple streams with zeros
\r
1555 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1556 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1558 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1562 else if ( handle->nStreams[0] == 1 ) {
\r
1563 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1564 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1565 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1567 else { // copy from user buffer
\r
1568 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1569 stream_.userBuffer[0],
\r
1570 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1573 else { // fill multiple streams
\r
1574 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1575 if ( stream_.doConvertBuffer[0] ) {
\r
1576 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1577 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1580 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1581 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1582 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1583 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1584 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1587 else { // fill multiple multi-channel streams with interleaved data
\r
1588 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1589 Float32 *out, *in;
\r
1591 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1592 UInt32 inChannels = stream_.nUserChannels[0];
\r
1593 if ( stream_.doConvertBuffer[0] ) {
\r
1594 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1595 inChannels = stream_.nDeviceChannels[0];
\r
1598 if ( inInterleaved ) inOffset = 1;
\r
1599 else inOffset = stream_.bufferSize;
\r
1601 channelsLeft = inChannels;
\r
1602 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1604 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1605 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1608 // Account for possible channel offset in first stream
\r
1609 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1610 streamChannels -= stream_.channelOffset[0];
\r
1611 outJump = stream_.channelOffset[0];
\r
1615 // Account for possible unfilled channels at end of the last stream
\r
1616 if ( streamChannels > channelsLeft ) {
\r
1617 outJump = streamChannels - channelsLeft;
\r
1618 streamChannels = channelsLeft;
\r
1621 // Determine input buffer offsets and skips
\r
1622 if ( inInterleaved ) {
\r
1623 inJump = inChannels;
\r
1624 in += inChannels - channelsLeft;
\r
1628 in += (inChannels - channelsLeft) * inOffset;
\r
1631 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1632 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1633 *out++ = in[j*inOffset];
\r
1638 channelsLeft -= streamChannels;
\r
1643 if ( handle->drainCounter ) {
\r
1644 handle->drainCounter++;
\r
1649 AudioDeviceID inputDevice;
\r
1650 inputDevice = handle->id[1];
\r
1651 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1653 if ( handle->nStreams[1] == 1 ) {
\r
1654 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1655 convertBuffer( stream_.userBuffer[1],
\r
1656 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1657 stream_.convertInfo[1] );
\r
1659 else { // copy to user buffer
\r
1660 memcpy( stream_.userBuffer[1],
\r
1661 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1662 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1665 else { // read from multiple streams
\r
1666 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1667 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1669 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1670 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1671 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1672 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1673 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1676 else { // read from multiple multi-channel streams
\r
1677 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1678 Float32 *out, *in;
\r
1680 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1681 UInt32 outChannels = stream_.nUserChannels[1];
\r
1682 if ( stream_.doConvertBuffer[1] ) {
\r
1683 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1684 outChannels = stream_.nDeviceChannels[1];
\r
1687 if ( outInterleaved ) outOffset = 1;
\r
1688 else outOffset = stream_.bufferSize;
\r
1690 channelsLeft = outChannels;
\r
1691 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1693 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1694 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1697 // Account for possible channel offset in first stream
\r
1698 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1699 streamChannels -= stream_.channelOffset[1];
\r
1700 inJump = stream_.channelOffset[1];
\r
1704 // Account for possible unread channels at end of the last stream
\r
1705 if ( streamChannels > channelsLeft ) {
\r
1706 inJump = streamChannels - channelsLeft;
\r
1707 streamChannels = channelsLeft;
\r
1710 // Determine output buffer offsets and skips
\r
1711 if ( outInterleaved ) {
\r
1712 outJump = outChannels;
\r
1713 out += outChannels - channelsLeft;
\r
1717 out += (outChannels - channelsLeft) * outOffset;
\r
1720 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1721 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1722 out[j*outOffset] = *in++;
\r
1727 channelsLeft -= streamChannels;
\r
1731 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1732 convertBuffer( stream_.userBuffer[1],
\r
1733 stream_.deviceBuffer,
\r
1734 stream_.convertInfo[1] );
\r
1740 //MUTEX_UNLOCK( &stream_.mutex );
\r
1742 RtApi::tickStreamTime();
\r
1746 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1750 case kAudioHardwareNotRunningError:
\r
1751 return "kAudioHardwareNotRunningError";
\r
1753 case kAudioHardwareUnspecifiedError:
\r
1754 return "kAudioHardwareUnspecifiedError";
\r
1756 case kAudioHardwareUnknownPropertyError:
\r
1757 return "kAudioHardwareUnknownPropertyError";
\r
1759 case kAudioHardwareBadPropertySizeError:
\r
1760 return "kAudioHardwareBadPropertySizeError";
\r
1762 case kAudioHardwareIllegalOperationError:
\r
1763 return "kAudioHardwareIllegalOperationError";
\r
1765 case kAudioHardwareBadObjectError:
\r
1766 return "kAudioHardwareBadObjectError";
\r
1768 case kAudioHardwareBadDeviceError:
\r
1769 return "kAudioHardwareBadDeviceError";
\r
1771 case kAudioHardwareBadStreamError:
\r
1772 return "kAudioHardwareBadStreamError";
\r
1774 case kAudioHardwareUnsupportedOperationError:
\r
1775 return "kAudioHardwareUnsupportedOperationError";
\r
1777 case kAudioDeviceUnsupportedFormatError:
\r
1778 return "kAudioDeviceUnsupportedFormatError";
\r
1780 case kAudioDevicePermissionsError:
\r
1781 return "kAudioDevicePermissionsError";
\r
1784 return "CoreAudio unknown error";
\r
1788 //******************** End of __MACOSX_CORE__ *********************//
\r
1791 #if defined(__UNIX_JACK__)
\r
1793 // JACK is a low-latency audio server, originally written for the
\r
1794 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1795 // connect a number of different applications to an audio device, as
\r
1796 // well as allowing them to share audio between themselves.
\r
1798 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1799 // have ports connected to the server. The JACK server is typically
\r
1800 // started in a terminal as follows:
\r
1802 // .jackd -d alsa -d hw:0
\r
1804 // or through an interface program such as qjackctl. Many of the
\r
1805 // parameters normally set for a stream are fixed by the JACK server
\r
1806 // and can be specified when the JACK server is started. In
\r
1809 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1811 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1812 // frames, and number of buffers = 4. Once the server is running, it
\r
1813 // is not possible to override these values. If the values are not
\r
1814 // specified in the command-line, the JACK server uses default values.
\r
1816 // The JACK server does not have to be running when an instance of
\r
1817 // RtApiJack is created, though the function getDeviceCount() will
\r
1818 // report 0 devices found until JACK has been started. When no
\r
1819 // devices are available (i.e., the JACK server is not running), a
\r
1820 // stream cannot be opened.
\r
1822 #include <jack/jack.h>
\r
1823 #include <unistd.h>
\r
1826 // A structure to hold various information related to the Jack API
\r
1827 // implementation.
\r
1828 struct JackHandle {
\r
1829 jack_client_t *client;
\r
1830 jack_port_t **ports[2];
\r
1831 std::string deviceName[2];
\r
1833 pthread_cond_t condition;
\r
1834 int drainCounter; // Tracks callback counts when draining
\r
1835 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1838 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1841 static void jackSilentError( const char * ) {};
\r
1843 RtApiJack :: RtApiJack()
\r
1845 // Nothing to do here.
\r
1846 #if !defined(__RTAUDIO_DEBUG__)
\r
1847 // Turn off Jack's internal error reporting.
\r
1848 jack_set_error_function( &jackSilentError );
\r
1852 RtApiJack :: ~RtApiJack()
\r
1854 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1857 unsigned int RtApiJack :: getDeviceCount( void )
\r
1859 // See if we can become a jack client.
\r
1860 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1861 jack_status_t *status = NULL;
\r
1862 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1863 if ( client == 0 ) return 0;
\r
1865 const char **ports;
\r
1866 std::string port, previousPort;
\r
1867 unsigned int nChannels = 0, nDevices = 0;
\r
1868 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1870 // Parse the port names up to the first colon (:).
\r
1871 size_t iColon = 0;
\r
1873 port = (char *) ports[ nChannels ];
\r
1874 iColon = port.find(":");
\r
1875 if ( iColon != std::string::npos ) {
\r
1876 port = port.substr( 0, iColon + 1 );
\r
1877 if ( port != previousPort ) {
\r
1879 previousPort = port;
\r
1882 } while ( ports[++nChannels] );
\r
1886 jack_client_close( client );
\r
1890 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1892 RtAudio::DeviceInfo info;
\r
1893 info.probed = false;
\r
1895 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1896 jack_status_t *status = NULL;
\r
1897 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1898 if ( client == 0 ) {
\r
1899 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1900 error( RtAudioError::WARNING );
\r
1904 const char **ports;
\r
1905 std::string port, previousPort;
\r
1906 unsigned int nPorts = 0, nDevices = 0;
\r
1907 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1909 // Parse the port names up to the first colon (:).
\r
1910 size_t iColon = 0;
\r
1912 port = (char *) ports[ nPorts ];
\r
1913 iColon = port.find(":");
\r
1914 if ( iColon != std::string::npos ) {
\r
1915 port = port.substr( 0, iColon );
\r
1916 if ( port != previousPort ) {
\r
1917 if ( nDevices == device ) info.name = port;
\r
1919 previousPort = port;
\r
1922 } while ( ports[++nPorts] );
\r
1926 if ( device >= nDevices ) {
\r
1927 jack_client_close( client );
\r
1928 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1929 error( RtAudioError::INVALID_USE );
\r
1933 // Get the current jack server sample rate.
\r
1934 info.sampleRates.clear();
\r
1935 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1937 // Count the available ports containing the client name as device
\r
1938 // channels. Jack "input ports" equal RtAudio output channels.
\r
1939 unsigned int nChannels = 0;
\r
1940 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1942 while ( ports[ nChannels ] ) nChannels++;
\r
1944 info.outputChannels = nChannels;
\r
1947 // Jack "output ports" equal RtAudio input channels.
\r
1949 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1951 while ( ports[ nChannels ] ) nChannels++;
\r
1953 info.inputChannels = nChannels;
\r
1956 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1957 jack_client_close(client);
\r
1958 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1959 error( RtAudioError::WARNING );
\r
1963 // If device opens for both playback and capture, we determine the channels.
\r
1964 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1965 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1967 // Jack always uses 32-bit floats.
\r
1968 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1970 // Jack doesn't provide default devices so we'll use the first available one.
\r
1971 if ( device == 0 && info.outputChannels > 0 )
\r
1972 info.isDefaultOutput = true;
\r
1973 if ( device == 0 && info.inputChannels > 0 )
\r
1974 info.isDefaultInput = true;
\r
1976 jack_client_close(client);
\r
1977 info.probed = true;
\r
1981 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1983 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1985 RtApiJack *object = (RtApiJack *) info->object;
\r
1986 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1991 // This function will be called by a spawned thread when the Jack
\r
1992 // server signals that it is shutting down. It is necessary to handle
\r
1993 // it this way because the jackShutdown() function must return before
\r
1994 // the jack_deactivate() function (in closeStream()) will return.
\r
1995 static void *jackCloseStream( void *ptr )
\r
1997 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1998 RtApiJack *object = (RtApiJack *) info->object;
\r
2000 object->closeStream();
\r
2002 pthread_exit( NULL );
\r
2004 static void jackShutdown( void *infoPointer )
\r
2006 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2007 RtApiJack *object = (RtApiJack *) info->object;
\r
2009 // Check current stream state. If stopped, then we'll assume this
\r
2010 // was called as a result of a call to RtApiJack::stopStream (the
\r
2011 // deactivation of a client handle causes this function to be called).
\r
2012 // If not, we'll assume the Jack server is shutting down or some
\r
2013 // other problem occurred and we should close the stream.
\r
2014 if ( object->isStreamRunning() == false ) return;
\r
2016 ThreadHandle threadId;
\r
2017 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2018 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2021 static int jackXrun( void *infoPointer )
\r
2023 JackHandle *handle = (JackHandle *) infoPointer;
\r
2025 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2026 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2031 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2032 unsigned int firstChannel, unsigned int sampleRate,
\r
2033 RtAudioFormat format, unsigned int *bufferSize,
\r
2034 RtAudio::StreamOptions *options )
\r
2036 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2038 // Look for jack server and try to become a client (only do once per stream).
\r
2039 jack_client_t *client = 0;
\r
2040 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2041 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2042 jack_status_t *status = NULL;
\r
2043 if ( options && !options->streamName.empty() )
\r
2044 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2046 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2047 if ( client == 0 ) {
\r
2048 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2049 error( RtAudioError::WARNING );
\r
2054 // The handle must have been created on an earlier pass.
\r
2055 client = handle->client;
\r
2058 const char **ports;
\r
2059 std::string port, previousPort, deviceName;
\r
2060 unsigned int nPorts = 0, nDevices = 0;
\r
2061 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2063 // Parse the port names up to the first colon (:).
\r
2064 size_t iColon = 0;
\r
2066 port = (char *) ports[ nPorts ];
\r
2067 iColon = port.find(":");
\r
2068 if ( iColon != std::string::npos ) {
\r
2069 port = port.substr( 0, iColon );
\r
2070 if ( port != previousPort ) {
\r
2071 if ( nDevices == device ) deviceName = port;
\r
2073 previousPort = port;
\r
2076 } while ( ports[++nPorts] );
\r
2080 if ( device >= nDevices ) {
\r
2081 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2085 // Count the available ports containing the client name as device
\r
2086 // channels. Jack "input ports" equal RtAudio output channels.
\r
2087 unsigned int nChannels = 0;
\r
2088 unsigned long flag = JackPortIsInput;
\r
2089 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2090 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2092 while ( ports[ nChannels ] ) nChannels++;
\r
2096 // Compare the jack ports for specified client to the requested number of channels.
\r
2097 if ( nChannels < (channels + firstChannel) ) {
\r
2098 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2099 errorText_ = errorStream_.str();
\r
2103 // Check the jack server sample rate.
\r
2104 unsigned int jackRate = jack_get_sample_rate( client );
\r
2105 if ( sampleRate != jackRate ) {
\r
2106 jack_client_close( client );
\r
2107 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2108 errorText_ = errorStream_.str();
\r
2111 stream_.sampleRate = jackRate;
\r
2113 // Get the latency of the JACK port.
\r
2114 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2115 if ( ports[ firstChannel ] ) {
\r
2116 // Added by Ge Wang
\r
2117 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2118 // the range (usually the min and max are equal)
\r
2119 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2120 // get the latency range
\r
2121 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2122 // be optimistic, use the min!
\r
2123 stream_.latency[mode] = latrange.min;
\r
2124 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2128 // The jack server always uses 32-bit floating-point data.
\r
2129 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2130 stream_.userFormat = format;
\r
2132 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2133 else stream_.userInterleaved = true;
\r
2135 // Jack always uses non-interleaved buffers.
\r
2136 stream_.deviceInterleaved[mode] = false;
\r
2138 // Jack always provides host byte-ordered data.
\r
2139 stream_.doByteSwap[mode] = false;
\r
2141 // Get the buffer size. The buffer size and number of buffers
\r
2142 // (periods) is set when the jack server is started.
\r
2143 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2144 *bufferSize = stream_.bufferSize;
\r
2146 stream_.nDeviceChannels[mode] = channels;
\r
2147 stream_.nUserChannels[mode] = channels;
\r
2149 // Set flags for buffer conversion.
\r
2150 stream_.doConvertBuffer[mode] = false;
\r
2151 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2152 stream_.doConvertBuffer[mode] = true;
\r
2153 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2154 stream_.nUserChannels[mode] > 1 )
\r
2155 stream_.doConvertBuffer[mode] = true;
\r
2157 // Allocate our JackHandle structure for the stream.
\r
2158 if ( handle == 0 ) {
\r
2160 handle = new JackHandle;
\r
2162 catch ( std::bad_alloc& ) {
\r
2163 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2167 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2168 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2171 stream_.apiHandle = (void *) handle;
\r
2172 handle->client = client;
\r
2174 handle->deviceName[mode] = deviceName;
\r
2176 // Allocate necessary internal buffers.
\r
2177 unsigned long bufferBytes;
\r
2178 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2179 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2180 if ( stream_.userBuffer[mode] == NULL ) {
\r
2181 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2185 if ( stream_.doConvertBuffer[mode] ) {
\r
2187 bool makeBuffer = true;
\r
2188 if ( mode == OUTPUT )
\r
2189 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2190 else { // mode == INPUT
\r
2191 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2192 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2193 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2194 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2198 if ( makeBuffer ) {
\r
2199 bufferBytes *= *bufferSize;
\r
2200 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2201 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2202 if ( stream_.deviceBuffer == NULL ) {
\r
2203 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2209 // Allocate memory for the Jack ports (channels) identifiers.
\r
2210 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2211 if ( handle->ports[mode] == NULL ) {
\r
2212 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2216 stream_.device[mode] = device;
\r
2217 stream_.channelOffset[mode] = firstChannel;
\r
2218 stream_.state = STREAM_STOPPED;
\r
2219 stream_.callbackInfo.object = (void *) this;
\r
2221 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2222 // We had already set up the stream for output.
\r
2223 stream_.mode = DUPLEX;
\r
2225 stream_.mode = mode;
\r
2226 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2227 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2228 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2231 // Register our ports.
\r
2233 if ( mode == OUTPUT ) {
\r
2234 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2235 snprintf( label, 64, "outport %d", i );
\r
2236 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2237 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2241 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2242 snprintf( label, 64, "inport %d", i );
\r
2243 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2244 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2248 // Setup the buffer conversion information structure. We don't use
\r
2249 // buffers to do channel offsets, so we override that parameter
\r
2251 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2257 pthread_cond_destroy( &handle->condition );
\r
2258 jack_client_close( handle->client );
\r
2260 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2261 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2264 stream_.apiHandle = 0;
\r
2267 for ( int i=0; i<2; i++ ) {
\r
2268 if ( stream_.userBuffer[i] ) {
\r
2269 free( stream_.userBuffer[i] );
\r
2270 stream_.userBuffer[i] = 0;
\r
2274 if ( stream_.deviceBuffer ) {
\r
2275 free( stream_.deviceBuffer );
\r
2276 stream_.deviceBuffer = 0;
\r
2282 void RtApiJack :: closeStream( void )
\r
2284 if ( stream_.state == STREAM_CLOSED ) {
\r
2285 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2286 error( RtAudioError::WARNING );
\r
2290 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2293 if ( stream_.state == STREAM_RUNNING )
\r
2294 jack_deactivate( handle->client );
\r
2296 jack_client_close( handle->client );
\r
2300 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2301 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2302 pthread_cond_destroy( &handle->condition );
\r
2304 stream_.apiHandle = 0;
\r
2307 for ( int i=0; i<2; i++ ) {
\r
2308 if ( stream_.userBuffer[i] ) {
\r
2309 free( stream_.userBuffer[i] );
\r
2310 stream_.userBuffer[i] = 0;
\r
2314 if ( stream_.deviceBuffer ) {
\r
2315 free( stream_.deviceBuffer );
\r
2316 stream_.deviceBuffer = 0;
\r
2319 stream_.mode = UNINITIALIZED;
\r
2320 stream_.state = STREAM_CLOSED;
\r
2323 void RtApiJack :: startStream( void )
\r
2326 if ( stream_.state == STREAM_RUNNING ) {
\r
2327 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2328 error( RtAudioError::WARNING );
\r
2332 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2333 int result = jack_activate( handle->client );
\r
2335 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2339 const char **ports;
\r
2341 // Get the list of available ports.
\r
2342 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2344 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2345 if ( ports == NULL) {
\r
2346 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2350 // Now make the port connections. Since RtAudio wasn't designed to
\r
2351 // allow the user to select particular channels of a device, we'll
\r
2352 // just open the first "nChannels" ports with offset.
\r
2353 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2355 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2356 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2359 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2366 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2368 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2369 if ( ports == NULL) {
\r
2370 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2374 // Now make the port connections. See note above.
\r
2375 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2377 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2378 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2381 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2388 handle->drainCounter = 0;
\r
2389 handle->internalDrain = false;
\r
2390 stream_.state = STREAM_RUNNING;
\r
2393 if ( result == 0 ) return;
\r
2394 error( RtAudioError::SYSTEM_ERROR );
\r
2397 void RtApiJack :: stopStream( void )
\r
2400 if ( stream_.state == STREAM_STOPPED ) {
\r
2401 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2402 error( RtAudioError::WARNING );
\r
2406 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2407 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2409 if ( handle->drainCounter == 0 ) {
\r
2410 handle->drainCounter = 2;
\r
2411 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2415 jack_deactivate( handle->client );
\r
2416 stream_.state = STREAM_STOPPED;
\r
2419 void RtApiJack :: abortStream( void )
\r
2422 if ( stream_.state == STREAM_STOPPED ) {
\r
2423 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2424 error( RtAudioError::WARNING );
\r
2428 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2429 handle->drainCounter = 2;
\r
2434 // This function will be called by a spawned thread when the user
\r
2435 // callback function signals that the stream should be stopped or
\r
2436 // aborted. It is necessary to handle it this way because the
\r
2437 // callbackEvent() function must return before the jack_deactivate()
\r
2438 // function will return.
\r
2439 static void *jackStopStream( void *ptr )
\r
2441 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2442 RtApiJack *object = (RtApiJack *) info->object;
\r
2444 object->stopStream();
\r
2445 pthread_exit( NULL );
\r
2448 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2450 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2451 if ( stream_.state == STREAM_CLOSED ) {
\r
2452 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2453 error( RtAudioError::WARNING );
\r
2456 if ( stream_.bufferSize != nframes ) {
\r
2457 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2458 error( RtAudioError::WARNING );
\r
2462 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2463 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2465 // Check if we were draining the stream and signal is finished.
\r
2466 if ( handle->drainCounter > 3 ) {
\r
2467 ThreadHandle threadId;
\r
2469 stream_.state = STREAM_STOPPING;
\r
2470 if ( handle->internalDrain == true )
\r
2471 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2473 pthread_cond_signal( &handle->condition );
\r
2477 // Invoke user callback first, to get fresh output data.
\r
2478 if ( handle->drainCounter == 0 ) {
\r
2479 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2480 double streamTime = getStreamTime();
\r
2481 RtAudioStreamStatus status = 0;
\r
2482 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2483 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2484 handle->xrun[0] = false;
\r
2486 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2487 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2488 handle->xrun[1] = false;
\r
2490 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2491 stream_.bufferSize, streamTime, status, info->userData );
\r
2492 if ( cbReturnValue == 2 ) {
\r
2493 stream_.state = STREAM_STOPPING;
\r
2494 handle->drainCounter = 2;
\r
2496 pthread_create( &id, NULL, jackStopStream, info );
\r
2499 else if ( cbReturnValue == 1 ) {
\r
2500 handle->drainCounter = 1;
\r
2501 handle->internalDrain = true;
\r
2505 jack_default_audio_sample_t *jackbuffer;
\r
2506 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2507 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2509 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2511 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2512 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2513 memset( jackbuffer, 0, bufferBytes );
\r
2517 else if ( stream_.doConvertBuffer[0] ) {
\r
2519 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2521 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2522 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2523 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2526 else { // no buffer conversion
\r
2527 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2528 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2529 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2533 if ( handle->drainCounter ) {
\r
2534 handle->drainCounter++;
\r
2539 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2541 if ( stream_.doConvertBuffer[1] ) {
\r
2542 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2543 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2544 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2546 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2548 else { // no buffer conversion
\r
2549 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2550 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2551 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2557 RtApi::tickStreamTime();
\r
2560 //******************** End of __UNIX_JACK__ *********************//
\r
2563 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2565 // The ASIO API is designed around a callback scheme, so this
\r
2566 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2567 // Jack. The primary constraint with ASIO is that it only allows
\r
2568 // access to a single driver at a time. Thus, it is not possible to
\r
2569 // have more than one simultaneous RtAudio stream.
\r
2571 // This implementation also requires a number of external ASIO files
\r
2572 // and a few global variables. The ASIO callback scheme does not
\r
2573 // allow for the passing of user data, so we must create a global
\r
2574 // pointer to our callbackInfo structure.
\r
2576 // On unix systems, we make use of a pthread condition variable.
\r
2577 // Since there is no equivalent in Windows, I hacked something based
\r
2578 // on information found in
\r
2579 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2581 #include "asiosys.h"
\r
2583 #include "iasiothiscallresolver.h"
\r
2584 #include "asiodrivers.h"
\r
2587 static AsioDrivers drivers;
\r
2588 static ASIOCallbacks asioCallbacks;
\r
2589 static ASIODriverInfo driverInfo;
\r
2590 static CallbackInfo *asioCallbackInfo;
\r
2591 static bool asioXRun;
\r
2593 struct AsioHandle {
\r
2594 int drainCounter; // Tracks callback counts when draining
\r
2595 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2596 ASIOBufferInfo *bufferInfos;
\r
2600 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2603 // Function declarations (definitions at end of section)
\r
2604 static const char* getAsioErrorString( ASIOError result );
\r
2605 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2606 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2608 RtApiAsio :: RtApiAsio()
\r
2610 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2611 // CoInitialize beforehand, but it must be for appartment threading
\r
2612 // (in which case, CoInitilialize will return S_FALSE here).
\r
2613 coInitialized_ = false;
\r
2614 HRESULT hr = CoInitialize( NULL );
\r
2615 if ( FAILED(hr) ) {
\r
2616 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2617 error( RtAudioError::WARNING );
\r
2619 coInitialized_ = true;
\r
2621 drivers.removeCurrentDriver();
\r
2622 driverInfo.asioVersion = 2;
\r
2624 // See note in DirectSound implementation about GetDesktopWindow().
\r
2625 driverInfo.sysRef = GetForegroundWindow();
\r
2628 RtApiAsio :: ~RtApiAsio()
\r
2630 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2631 if ( coInitialized_ ) CoUninitialize();
\r
2634 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2636 return (unsigned int) drivers.asioGetNumDev();
\r
2639 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2641 RtAudio::DeviceInfo info;
\r
2642 info.probed = false;
\r
2645 unsigned int nDevices = getDeviceCount();
\r
2646 if ( nDevices == 0 ) {
\r
2647 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2648 error( RtAudioError::INVALID_USE );
\r
2652 if ( device >= nDevices ) {
\r
2653 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2654 error( RtAudioError::INVALID_USE );
\r
2658 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2659 if ( stream_.state != STREAM_CLOSED ) {
\r
2660 if ( device >= devices_.size() ) {
\r
2661 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2662 error( RtAudioError::WARNING );
\r
2665 return devices_[ device ];
\r
2668 char driverName[32];
\r
2669 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2670 if ( result != ASE_OK ) {
\r
2671 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2672 errorText_ = errorStream_.str();
\r
2673 error( RtAudioError::WARNING );
\r
2677 info.name = driverName;
\r
2679 if ( !drivers.loadDriver( driverName ) ) {
\r
2680 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2681 errorText_ = errorStream_.str();
\r
2682 error( RtAudioError::WARNING );
\r
2686 result = ASIOInit( &driverInfo );
\r
2687 if ( result != ASE_OK ) {
\r
2688 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2689 errorText_ = errorStream_.str();
\r
2690 error( RtAudioError::WARNING );
\r
2694 // Determine the device channel information.
\r
2695 long inputChannels, outputChannels;
\r
2696 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2697 if ( result != ASE_OK ) {
\r
2698 drivers.removeCurrentDriver();
\r
2699 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2700 errorText_ = errorStream_.str();
\r
2701 error( RtAudioError::WARNING );
\r
2705 info.outputChannels = outputChannels;
\r
2706 info.inputChannels = inputChannels;
\r
2707 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2708 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2710 // Determine the supported sample rates.
\r
2711 info.sampleRates.clear();
\r
2712 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2713 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2714 if ( result == ASE_OK )
\r
2715 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2718 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2719 ASIOChannelInfo channelInfo;
\r
2720 channelInfo.channel = 0;
\r
2721 channelInfo.isInput = true;
\r
2722 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2723 result = ASIOGetChannelInfo( &channelInfo );
\r
2724 if ( result != ASE_OK ) {
\r
2725 drivers.removeCurrentDriver();
\r
2726 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2727 errorText_ = errorStream_.str();
\r
2728 error( RtAudioError::WARNING );
\r
2732 info.nativeFormats = 0;
\r
2733 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2734 info.nativeFormats |= RTAUDIO_SINT16;
\r
2735 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2736 info.nativeFormats |= RTAUDIO_SINT32;
\r
2737 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2738 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2739 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2740 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2741 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2742 info.nativeFormats |= RTAUDIO_SINT24;
\r
2744 if ( info.outputChannels > 0 )
\r
2745 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2746 if ( info.inputChannels > 0 )
\r
2747 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2749 info.probed = true;
\r
2750 drivers.removeCurrentDriver();
\r
2754 static void bufferSwitch( long index, ASIOBool processNow )
\r
2756 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2757 object->callbackEvent( index );
\r
2760 void RtApiAsio :: saveDeviceInfo( void )
\r
2764 unsigned int nDevices = getDeviceCount();
\r
2765 devices_.resize( nDevices );
\r
2766 for ( unsigned int i=0; i<nDevices; i++ )
\r
2767 devices_[i] = getDeviceInfo( i );
\r
2770 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2771 unsigned int firstChannel, unsigned int sampleRate,
\r
2772 RtAudioFormat format, unsigned int *bufferSize,
\r
2773 RtAudio::StreamOptions *options )
\r
2775 // For ASIO, a duplex stream MUST use the same driver.
\r
2776 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2777 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2781 char driverName[32];
\r
2782 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2783 if ( result != ASE_OK ) {
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2789 // Only load the driver once for duplex stream.
\r
2790 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2791 // The getDeviceInfo() function will not work when a stream is open
\r
2792 // because ASIO does not allow multiple devices to run at the same
\r
2793 // time. Thus, we'll probe the system before opening a stream and
\r
2794 // save the results for use by getDeviceInfo().
\r
2795 this->saveDeviceInfo();
\r
2797 if ( !drivers.loadDriver( driverName ) ) {
\r
2798 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2799 errorText_ = errorStream_.str();
\r
2803 result = ASIOInit( &driverInfo );
\r
2804 if ( result != ASE_OK ) {
\r
2805 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2806 errorText_ = errorStream_.str();
\r
2811 // Check the device channel count.
\r
2812 long inputChannels, outputChannels;
\r
2813 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2814 if ( result != ASE_OK ) {
\r
2815 drivers.removeCurrentDriver();
\r
2816 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2817 errorText_ = errorStream_.str();
\r
2821 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2822 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2823 drivers.removeCurrentDriver();
\r
2824 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2825 errorText_ = errorStream_.str();
\r
2828 stream_.nDeviceChannels[mode] = channels;
\r
2829 stream_.nUserChannels[mode] = channels;
\r
2830 stream_.channelOffset[mode] = firstChannel;
\r
2832 // Verify the sample rate is supported.
\r
2833 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2834 if ( result != ASE_OK ) {
\r
2835 drivers.removeCurrentDriver();
\r
2836 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2837 errorText_ = errorStream_.str();
\r
2841 // Get the current sample rate
\r
2842 ASIOSampleRate currentRate;
\r
2843 result = ASIOGetSampleRate( ¤tRate );
\r
2844 if ( result != ASE_OK ) {
\r
2845 drivers.removeCurrentDriver();
\r
2846 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2847 errorText_ = errorStream_.str();
\r
2851 // Set the sample rate only if necessary
\r
2852 if ( currentRate != sampleRate ) {
\r
2853 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2854 if ( result != ASE_OK ) {
\r
2855 drivers.removeCurrentDriver();
\r
2856 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2857 errorText_ = errorStream_.str();
\r
2862 // Determine the driver data type.
\r
2863 ASIOChannelInfo channelInfo;
\r
2864 channelInfo.channel = 0;
\r
2865 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2866 else channelInfo.isInput = true;
\r
2867 result = ASIOGetChannelInfo( &channelInfo );
\r
2868 if ( result != ASE_OK ) {
\r
2869 drivers.removeCurrentDriver();
\r
2870 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2871 errorText_ = errorStream_.str();
\r
2875 // Assuming WINDOWS host is always little-endian.
\r
2876 stream_.doByteSwap[mode] = false;
\r
2877 stream_.userFormat = format;
\r
2878 stream_.deviceFormat[mode] = 0;
\r
2879 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2880 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2881 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2883 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2884 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2885 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2887 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2888 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2889 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2891 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2892 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2893 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2895 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2896 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2897 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2900 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2901 drivers.removeCurrentDriver();
\r
2902 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2903 errorText_ = errorStream_.str();
\r
2907 // Set the buffer size. For a duplex stream, this will end up
\r
2908 // setting the buffer size based on the input constraints, which
\r
2910 long minSize, maxSize, preferSize, granularity;
\r
2911 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2912 if ( result != ASE_OK ) {
\r
2913 drivers.removeCurrentDriver();
\r
2914 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2915 errorText_ = errorStream_.str();
\r
2919 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2920 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2921 else if ( granularity == -1 ) {
\r
2922 // Make sure bufferSize is a power of two.
\r
2923 int log2_of_min_size = 0;
\r
2924 int log2_of_max_size = 0;
\r
2926 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2927 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2928 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2931 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2932 int min_delta_num = log2_of_min_size;
\r
2934 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2935 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2936 if (current_delta < min_delta) {
\r
2937 min_delta = current_delta;
\r
2938 min_delta_num = i;
\r
2942 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2943 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2944 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2946 else if ( granularity != 0 ) {
\r
2947 // Set to an even multiple of granularity, rounding up.
\r
2948 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2951 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2952 drivers.removeCurrentDriver();
\r
2953 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2957 stream_.bufferSize = *bufferSize;
\r
2958 stream_.nBuffers = 2;
\r
2960 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2961 else stream_.userInterleaved = true;
\r
2963 // ASIO always uses non-interleaved buffers.
\r
2964 stream_.deviceInterleaved[mode] = false;
\r
2966 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2967 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2968 if ( handle == 0 ) {
\r
2970 handle = new AsioHandle;
\r
2972 catch ( std::bad_alloc& ) {
\r
2973 //if ( handle == NULL ) {
\r
2974 drivers.removeCurrentDriver();
\r
2975 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2978 handle->bufferInfos = 0;
\r
2980 // Create a manual-reset event.
\r
2981 handle->condition = CreateEvent( NULL, // no security
\r
2982 TRUE, // manual-reset
\r
2983 FALSE, // non-signaled initially
\r
2984 NULL ); // unnamed
\r
2985 stream_.apiHandle = (void *) handle;
\r
2988 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2989 // and output separately, we'll have to dispose of previously
\r
2990 // created output buffers for a duplex stream.
\r
2991 long inputLatency, outputLatency;
\r
2992 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2993 ASIODisposeBuffers();
\r
2994 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2997 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2998 bool buffersAllocated = false;
\r
2999 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3000 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3001 if ( handle->bufferInfos == NULL ) {
\r
3002 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3003 errorText_ = errorStream_.str();
\r
3007 ASIOBufferInfo *infos;
\r
3008 infos = handle->bufferInfos;
\r
3009 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3010 infos->isInput = ASIOFalse;
\r
3011 infos->channelNum = i + stream_.channelOffset[0];
\r
3012 infos->buffers[0] = infos->buffers[1] = 0;
\r
3014 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3015 infos->isInput = ASIOTrue;
\r
3016 infos->channelNum = i + stream_.channelOffset[1];
\r
3017 infos->buffers[0] = infos->buffers[1] = 0;
\r
3020 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3021 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3022 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3023 asioCallbacks.asioMessage = &asioMessages;
\r
3024 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3025 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3026 if ( result != ASE_OK ) {
\r
3027 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3028 errorText_ = errorStream_.str();
\r
3031 buffersAllocated = true;
\r
3033 // Set flags for buffer conversion.
\r
3034 stream_.doConvertBuffer[mode] = false;
\r
3035 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3036 stream_.doConvertBuffer[mode] = true;
\r
3037 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3038 stream_.nUserChannels[mode] > 1 )
\r
3039 stream_.doConvertBuffer[mode] = true;
\r
3041 // Allocate necessary internal buffers
\r
3042 unsigned long bufferBytes;
\r
3043 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3044 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3045 if ( stream_.userBuffer[mode] == NULL ) {
\r
3046 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3050 if ( stream_.doConvertBuffer[mode] ) {
\r
3052 bool makeBuffer = true;
\r
3053 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3054 if ( mode == INPUT ) {
\r
3055 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3056 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3057 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3061 if ( makeBuffer ) {
\r
3062 bufferBytes *= *bufferSize;
\r
3063 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3064 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3065 if ( stream_.deviceBuffer == NULL ) {
\r
3066 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3072 stream_.sampleRate = sampleRate;
\r
3073 stream_.device[mode] = device;
\r
3074 stream_.state = STREAM_STOPPED;
\r
3075 asioCallbackInfo = &stream_.callbackInfo;
\r
3076 stream_.callbackInfo.object = (void *) this;
\r
3077 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3078 // We had already set up an output stream.
\r
3079 stream_.mode = DUPLEX;
\r
3081 stream_.mode = mode;
\r
3083 // Determine device latencies
\r
3084 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3085 if ( result != ASE_OK ) {
\r
3086 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3087 errorText_ = errorStream_.str();
\r
3088 error( RtAudioError::WARNING); // warn but don't fail
\r
3091 stream_.latency[0] = outputLatency;
\r
3092 stream_.latency[1] = inputLatency;
\r
3095 // Setup the buffer conversion information structure. We don't use
\r
3096 // buffers to do channel offsets, so we override that parameter
\r
3098 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3103 if ( buffersAllocated )
\r
3104 ASIODisposeBuffers();
\r
3105 drivers.removeCurrentDriver();
\r
3108 CloseHandle( handle->condition );
\r
3109 if ( handle->bufferInfos )
\r
3110 free( handle->bufferInfos );
\r
3112 stream_.apiHandle = 0;
\r
3115 for ( int i=0; i<2; i++ ) {
\r
3116 if ( stream_.userBuffer[i] ) {
\r
3117 free( stream_.userBuffer[i] );
\r
3118 stream_.userBuffer[i] = 0;
\r
3122 if ( stream_.deviceBuffer ) {
\r
3123 free( stream_.deviceBuffer );
\r
3124 stream_.deviceBuffer = 0;
\r
3130 void RtApiAsio :: closeStream()
\r
3132 if ( stream_.state == STREAM_CLOSED ) {
\r
3133 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3134 error( RtAudioError::WARNING );
\r
3138 if ( stream_.state == STREAM_RUNNING ) {
\r
3139 stream_.state = STREAM_STOPPED;
\r
3142 ASIODisposeBuffers();
\r
3143 drivers.removeCurrentDriver();
\r
3145 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3147 CloseHandle( handle->condition );
\r
3148 if ( handle->bufferInfos )
\r
3149 free( handle->bufferInfos );
\r
3151 stream_.apiHandle = 0;
\r
3154 for ( int i=0; i<2; i++ ) {
\r
3155 if ( stream_.userBuffer[i] ) {
\r
3156 free( stream_.userBuffer[i] );
\r
3157 stream_.userBuffer[i] = 0;
\r
3161 if ( stream_.deviceBuffer ) {
\r
3162 free( stream_.deviceBuffer );
\r
3163 stream_.deviceBuffer = 0;
\r
3166 stream_.mode = UNINITIALIZED;
\r
3167 stream_.state = STREAM_CLOSED;
\r
3170 bool stopThreadCalled = false;
\r
3172 void RtApiAsio :: startStream()
\r
3175 if ( stream_.state == STREAM_RUNNING ) {
\r
3176 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3177 error( RtAudioError::WARNING );
\r
3181 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3182 ASIOError result = ASIOStart();
\r
3183 if ( result != ASE_OK ) {
\r
3184 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3185 errorText_ = errorStream_.str();
\r
3189 handle->drainCounter = 0;
\r
3190 handle->internalDrain = false;
\r
3191 ResetEvent( handle->condition );
\r
3192 stream_.state = STREAM_RUNNING;
\r
3196 stopThreadCalled = false;
\r
3198 if ( result == ASE_OK ) return;
\r
3199 error( RtAudioError::SYSTEM_ERROR );
\r
3202 void RtApiAsio :: stopStream()
\r
3205 if ( stream_.state == STREAM_STOPPED ) {
\r
3206 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3207 error( RtAudioError::WARNING );
\r
3211 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3212 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3213 if ( handle->drainCounter == 0 ) {
\r
3214 handle->drainCounter = 2;
\r
3215 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3219 stream_.state = STREAM_STOPPED;
\r
3221 ASIOError result = ASIOStop();
\r
3222 if ( result != ASE_OK ) {
\r
3223 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3224 errorText_ = errorStream_.str();
\r
3227 if ( result == ASE_OK ) return;
\r
3228 error( RtAudioError::SYSTEM_ERROR );
\r
3231 void RtApiAsio :: abortStream()
\r
3234 if ( stream_.state == STREAM_STOPPED ) {
\r
3235 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3236 error( RtAudioError::WARNING );
\r
3240 // The following lines were commented-out because some behavior was
\r
3241 // noted where the device buffers need to be zeroed to avoid
\r
3242 // continuing sound, even when the device buffers are completely
\r
3243 // disposed. So now, calling abort is the same as calling stop.
\r
3244 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3245 // handle->drainCounter = 2;
\r
3249 // This function will be called by a spawned thread when the user
\r
3250 // callback function signals that the stream should be stopped or
\r
3251 // aborted. It is necessary to handle it this way because the
\r
3252 // callbackEvent() function must return before the ASIOStop()
\r
3253 // function will return.
\r
3254 static unsigned __stdcall asioStopStream( void *ptr )
\r
3256 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3257 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3259 object->stopStream();
\r
3260 _endthreadex( 0 );
\r
3264 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3266 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3267 if ( stream_.state == STREAM_CLOSED ) {
\r
3268 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3269 error( RtAudioError::WARNING );
\r
3273 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3274 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3276 // Check if we were draining the stream and signal if finished.
\r
3277 if ( handle->drainCounter > 3 ) {
\r
3279 stream_.state = STREAM_STOPPING;
\r
3280 if ( handle->internalDrain == false )
\r
3281 SetEvent( handle->condition );
\r
3282 else { // spawn a thread to stop the stream
\r
3283 unsigned threadId;
\r
3284 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3285 &stream_.callbackInfo, 0, &threadId );
\r
3290 // Invoke user callback to get fresh output data UNLESS we are
\r
3291 // draining stream.
\r
3292 if ( handle->drainCounter == 0 ) {
\r
3293 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3294 double streamTime = getStreamTime();
\r
3295 RtAudioStreamStatus status = 0;
\r
3296 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3297 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3300 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3301 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3304 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3305 stream_.bufferSize, streamTime, status, info->userData );
\r
3306 if ( cbReturnValue == 2 ) {
\r
3307 stream_.state = STREAM_STOPPING;
\r
3308 handle->drainCounter = 2;
\r
3309 unsigned threadId;
\r
3310 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3311 &stream_.callbackInfo, 0, &threadId );
\r
3314 else if ( cbReturnValue == 1 ) {
\r
3315 handle->drainCounter = 1;
\r
3316 handle->internalDrain = true;
\r
3320 unsigned int nChannels, bufferBytes, i, j;
\r
3321 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3322 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3324 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3326 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3328 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3329 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3330 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3334 else if ( stream_.doConvertBuffer[0] ) {
\r
3336 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3337 if ( stream_.doByteSwap[0] )
\r
3338 byteSwapBuffer( stream_.deviceBuffer,
\r
3339 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3340 stream_.deviceFormat[0] );
\r
3342 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3343 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3344 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3345 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3351 if ( stream_.doByteSwap[0] )
\r
3352 byteSwapBuffer( stream_.userBuffer[0],
\r
3353 stream_.bufferSize * stream_.nUserChannels[0],
\r
3354 stream_.userFormat );
\r
3356 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3357 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3358 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3359 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3364 if ( handle->drainCounter ) {
\r
3365 handle->drainCounter++;
\r
3370 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3372 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3374 if (stream_.doConvertBuffer[1]) {
\r
3376 // Always interleave ASIO input data.
\r
3377 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3378 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3379 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3380 handle->bufferInfos[i].buffers[bufferIndex],
\r
3384 if ( stream_.doByteSwap[1] )
\r
3385 byteSwapBuffer( stream_.deviceBuffer,
\r
3386 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3387 stream_.deviceFormat[1] );
\r
3388 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3392 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3393 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3394 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3395 handle->bufferInfos[i].buffers[bufferIndex],
\r
3400 if ( stream_.doByteSwap[1] )
\r
3401 byteSwapBuffer( stream_.userBuffer[1],
\r
3402 stream_.bufferSize * stream_.nUserChannels[1],
\r
3403 stream_.userFormat );
\r
3408 // The following call was suggested by Malte Clasen. While the API
\r
3409 // documentation indicates it should not be required, some device
\r
3410 // drivers apparently do not function correctly without it.
\r
3411 ASIOOutputReady();
\r
3413 RtApi::tickStreamTime();
\r
3417 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3419 // The ASIO documentation says that this usually only happens during
\r
3420 // external sync. Audio processing is not stopped by the driver,
\r
3421 // actual sample rate might not have even changed, maybe only the
\r
3422 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3425 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3427 object->stopStream();
\r
3429 catch ( RtAudioError &exception ) {
\r
3430 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3434 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3437 static long asioMessages( long selector, long value, void* message, double* opt )
\r
3441 switch( selector ) {
\r
3442 case kAsioSelectorSupported:
\r
3443 if ( value == kAsioResetRequest
\r
3444 || value == kAsioEngineVersion
\r
3445 || value == kAsioResyncRequest
\r
3446 || value == kAsioLatenciesChanged
\r
3447 // The following three were added for ASIO 2.0, you don't
\r
3448 // necessarily have to support them.
\r
3449 || value == kAsioSupportsTimeInfo
\r
3450 || value == kAsioSupportsTimeCode
\r
3451 || value == kAsioSupportsInputMonitor)
\r
3454 case kAsioResetRequest:
\r
3455 // Defer the task and perform the reset of the driver during the
\r
3456 // next "safe" situation. You cannot reset the driver right now,
\r
3457 // as this code is called from the driver. Reset the driver is
\r
3458 // done by completely destruct is. I.e. ASIOStop(),
\r
3459 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3461 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3464 case kAsioResyncRequest:
\r
3465 // This informs the application that the driver encountered some
\r
3466 // non-fatal data loss. It is used for synchronization purposes
\r
3467 // of different media. Added mainly to work around the Win16Mutex
\r
3468 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3469 // which could lose data because the Mutex was held too long by
\r
3470 // another thread. However a driver can issue it in other
\r
3471 // situations, too.
\r
3472 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3476 case kAsioLatenciesChanged:
\r
3477 // This will inform the host application that the drivers were
\r
3478 // latencies changed. Beware, it this does not mean that the
\r
3479 // buffer sizes have changed! You might need to update internal
\r
3481 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3484 case kAsioEngineVersion:
\r
3485 // Return the supported ASIO version of the host application. If
\r
3486 // a host application does not implement this selector, ASIO 1.0
\r
3487 // is assumed by the driver.
\r
3490 case kAsioSupportsTimeInfo:
\r
3491 // Informs the driver whether the
\r
3492 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3493 // For compatibility with ASIO 1.0 drivers the host application
\r
3494 // should always support the "old" bufferSwitch method, too.
\r
3497 case kAsioSupportsTimeCode:
\r
3498 // Informs the driver whether application is interested in time
\r
3499 // code info. If an application does not need to know about time
\r
3500 // code, the driver has less work to do.
\r
3507 static const char* getAsioErrorString( ASIOError result )
\r
3512 const char*message;
\r
3515 static const Messages m[] =
\r
3517 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3518 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3519 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3520 { ASE_InvalidMode, "Invalid mode." },
\r
3521 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3522 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3523 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3526 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3527 if ( m[i].value == result ) return m[i].message;
\r
3529 return "Unknown error.";
\r
3531 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3535 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3537 // Modified by Robin Davies, October 2005
\r
3538 // - Improvements to DirectX pointer chasing.
\r
3539 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3540 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3541 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3542 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3544 #include <dsound.h>
\r
3545 #include <assert.h>
\r
3546 #include <algorithm>
\r
3548 #if defined(__MINGW32__)
\r
3549 // missing from latest mingw winapi
\r
3550 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3551 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3552 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3553 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3556 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3558 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3559 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3562 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3564 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3565 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3566 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3567 return pointer >= earlierPointer && pointer < laterPointer;
\r
3570 // A structure to hold various information related to the DirectSound
\r
3571 // API implementation.
\r
3573 unsigned int drainCounter; // Tracks callback counts when draining
\r
3574 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3578 UINT bufferPointer[2];
\r
3579 DWORD dsBufferSize[2];
\r
3580 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3584 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3587 // Declarations for utility functions, callbacks, and structures
\r
3588 // specific to the DirectSound implementation.
\r
3589 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3590 LPCTSTR description,
\r
3592 LPVOID lpContext );
\r
3594 static const char* getErrorString( int code );
\r
3596 static unsigned __stdcall callbackHandler( void *ptr );
\r
3605 : found(false) { validId[0] = false; validId[1] = false; }
\r
3608 struct DsProbeData {
\r
3610 std::vector<struct DsDevice>* dsDevices;
\r
3613 RtApiDs :: RtApiDs()
\r
3615 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3616 // accept whatever the mainline chose for a threading model.
\r
3617 coInitialized_ = false;
\r
3618 HRESULT hr = CoInitialize( NULL );
\r
3619 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3622 RtApiDs :: ~RtApiDs()
\r
3624 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3625 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3628 // The DirectSound default output is always the first device.
\r
3629 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3634 // The DirectSound default input is always the first input device,
\r
3635 // which is the first capture device enumerated.
\r
3636 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3641 unsigned int RtApiDs :: getDeviceCount( void )
\r
3643 // Set query flag for previously found devices to false, so that we
\r
3644 // can check for any devices that have disappeared.
\r
3645 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3646 dsDevices[i].found = false;
\r
3648 // Query DirectSound devices.
\r
3649 struct DsProbeData probeInfo;
\r
3650 probeInfo.isInput = false;
\r
3651 probeInfo.dsDevices = &dsDevices;
\r
3652 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3653 if ( FAILED( result ) ) {
\r
3654 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3655 errorText_ = errorStream_.str();
\r
3656 error( RtAudioError::WARNING );
\r
3659 // Query DirectSoundCapture devices.
\r
3660 probeInfo.isInput = true;
\r
3661 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
3662 if ( FAILED( result ) ) {
\r
3663 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3664 errorText_ = errorStream_.str();
\r
3665 error( RtAudioError::WARNING );
\r
3668 // Clean out any devices that may have disappeared.
\r
3669 std::vector< int > indices;
\r
3670 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3671 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3672 unsigned int nErased = 0;
\r
3673 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3674 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3676 return dsDevices.size();
\r
3679 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3681 RtAudio::DeviceInfo info;
\r
3682 info.probed = false;
\r
3684 if ( dsDevices.size() == 0 ) {
\r
3685 // Force a query of all devices
\r
3687 if ( dsDevices.size() == 0 ) {
\r
3688 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3689 error( RtAudioError::INVALID_USE );
\r
3694 if ( device >= dsDevices.size() ) {
\r
3695 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3696 error( RtAudioError::INVALID_USE );
\r
3701 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3703 LPDIRECTSOUND output;
\r
3705 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3706 if ( FAILED( result ) ) {
\r
3707 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3708 errorText_ = errorStream_.str();
\r
3709 error( RtAudioError::WARNING );
\r
3713 outCaps.dwSize = sizeof( outCaps );
\r
3714 result = output->GetCaps( &outCaps );
\r
3715 if ( FAILED( result ) ) {
\r
3716 output->Release();
\r
3717 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3718 errorText_ = errorStream_.str();
\r
3719 error( RtAudioError::WARNING );
\r
3723 // Get output channel information.
\r
3724 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3726 // Get sample rate information.
\r
3727 info.sampleRates.clear();
\r
3728 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3729 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3730 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3731 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3734 // Get format information.
\r
3735 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3736 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3738 output->Release();
\r
3740 if ( getDefaultOutputDevice() == device )
\r
3741 info.isDefaultOutput = true;
\r
3743 if ( dsDevices[ device ].validId[1] == false ) {
\r
3744 info.name = dsDevices[ device ].name;
\r
3745 info.probed = true;
\r
3751 LPDIRECTSOUNDCAPTURE input;
\r
3752 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3753 if ( FAILED( result ) ) {
\r
3754 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3755 errorText_ = errorStream_.str();
\r
3756 error( RtAudioError::WARNING );
\r
3761 inCaps.dwSize = sizeof( inCaps );
\r
3762 result = input->GetCaps( &inCaps );
\r
3763 if ( FAILED( result ) ) {
\r
3765 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3766 errorText_ = errorStream_.str();
\r
3767 error( RtAudioError::WARNING );
\r
3771 // Get input channel information.
\r
3772 info.inputChannels = inCaps.dwChannels;
\r
3774 // Get sample rate and format information.
\r
3775 std::vector<unsigned int> rates;
\r
3776 if ( inCaps.dwChannels >= 2 ) {
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3781 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3786 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3787 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3792 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3793 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3794 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3795 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3796 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3799 else if ( inCaps.dwChannels == 1 ) {
\r
3800 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3801 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3802 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3803 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3804 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3805 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3806 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3807 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3809 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3813 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3815 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3817 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3818 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3819 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3822 else info.inputChannels = 0; // technically, this would be an error
\r
3826 if ( info.inputChannels == 0 ) return info;
\r
3828 // Copy the supported rates to the info structure but avoid duplication.
\r
3830 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3832 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3833 if ( rates[i] == info.sampleRates[j] ) {
\r
3838 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3840 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3842 // If device opens for both playback and capture, we determine the channels.
\r
3843 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3844 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3846 if ( device == 0 ) info.isDefaultInput = true;
\r
3848 // Copy name and return.
\r
3849 info.name = dsDevices[ device ].name;
\r
3850 info.probed = true;
\r
3854 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3855 unsigned int firstChannel, unsigned int sampleRate,
\r
3856 RtAudioFormat format, unsigned int *bufferSize,
\r
3857 RtAudio::StreamOptions *options )
\r
3859 if ( channels + firstChannel > 2 ) {
\r
3860 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3864 unsigned int nDevices = dsDevices.size();
\r
3865 if ( nDevices == 0 ) {
\r
3866 // This should not happen because a check is made before this function is called.
\r
3867 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3871 if ( device >= nDevices ) {
\r
3872 // This should not happen because a check is made before this function is called.
\r
3873 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3877 if ( mode == OUTPUT ) {
\r
3878 if ( dsDevices[ device ].validId[0] == false ) {
\r
3879 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3880 errorText_ = errorStream_.str();
\r
3884 else { // mode == INPUT
\r
3885 if ( dsDevices[ device ].validId[1] == false ) {
\r
3886 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3887 errorText_ = errorStream_.str();
\r
3892 // According to a note in PortAudio, using GetDesktopWindow()
\r
3893 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3894 // that occur when the application's window is not the foreground
\r
3895 // window. Also, if the application window closes before the
\r
3896 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3897 // problems when using GetDesktopWindow() but it seems fine now
\r
3898 // (January 2010). I'll leave it commented here.
\r
3899 // HWND hWnd = GetForegroundWindow();
\r
3900 HWND hWnd = GetDesktopWindow();
\r
3902 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3903 // two. This is a judgement call and a value of two is probably too
\r
3904 // low for capture, but it should work for playback.
\r
3906 if ( options ) nBuffers = options->numberOfBuffers;
\r
3907 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3908 if ( nBuffers < 2 ) nBuffers = 3;
\r
3910 // Check the lower range of the user-specified buffer size and set
\r
3911 // (arbitrarily) to a lower bound of 32.
\r
3912 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3914 // Create the wave format structure. The data format setting will
\r
3915 // be determined later.
\r
3916 WAVEFORMATEX waveFormat;
\r
3917 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3918 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3919 waveFormat.nChannels = channels + firstChannel;
\r
3920 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3922 // Determine the device buffer size. By default, we'll use the value
\r
3923 // defined above (32K), but we will grow it to make allowances for
\r
3924 // very large software buffer sizes.
\r
3925 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3926 DWORD dsPointerLeadTime = 0;
\r
3928 void *ohandle = 0, *bhandle = 0;
\r
3930 if ( mode == OUTPUT ) {
\r
3932 LPDIRECTSOUND output;
\r
3933 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3934 if ( FAILED( result ) ) {
\r
3935 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3936 errorText_ = errorStream_.str();
\r
3941 outCaps.dwSize = sizeof( outCaps );
\r
3942 result = output->GetCaps( &outCaps );
\r
3943 if ( FAILED( result ) ) {
\r
3944 output->Release();
\r
3945 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3946 errorText_ = errorStream_.str();
\r
3950 // Check channel information.
\r
3951 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3952 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3953 errorText_ = errorStream_.str();
\r
3957 // Check format information. Use 16-bit format unless not
\r
3958 // supported or user requests 8-bit.
\r
3959 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3960 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3961 waveFormat.wBitsPerSample = 16;
\r
3962 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3965 waveFormat.wBitsPerSample = 8;
\r
3966 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3968 stream_.userFormat = format;
\r
3970 // Update wave format structure and buffer information.
\r
3971 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3972 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3973 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3975 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3976 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3977 dsBufferSize *= 2;
\r
3979 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3980 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3981 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3982 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3983 if ( FAILED( result ) ) {
\r
3984 output->Release();
\r
3985 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3986 errorText_ = errorStream_.str();
\r
3990 // Even though we will write to the secondary buffer, we need to
\r
3991 // access the primary buffer to set the correct output format
\r
3992 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3993 // buffer description.
\r
3994 DSBUFFERDESC bufferDescription;
\r
3995 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3996 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3997 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3999 // Obtain the primary buffer
\r
4000 LPDIRECTSOUNDBUFFER buffer;
\r
4001 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4002 if ( FAILED( result ) ) {
\r
4003 output->Release();
\r
4004 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4005 errorText_ = errorStream_.str();
\r
4009 // Set the primary DS buffer sound format.
\r
4010 result = buffer->SetFormat( &waveFormat );
\r
4011 if ( FAILED( result ) ) {
\r
4012 output->Release();
\r
4013 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4014 errorText_ = errorStream_.str();
\r
4018 // Setup the secondary DS buffer description.
\r
4019 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4020 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4021 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4022 DSBCAPS_GLOBALFOCUS |
\r
4023 DSBCAPS_GETCURRENTPOSITION2 |
\r
4024 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4025 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4026 bufferDescription.lpwfxFormat = &waveFormat;
\r
4028 // Try to create the secondary DS buffer. If that doesn't work,
\r
4029 // try to use software mixing. Otherwise, there's a problem.
\r
4030 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4031 if ( FAILED( result ) ) {
\r
4032 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4033 DSBCAPS_GLOBALFOCUS |
\r
4034 DSBCAPS_GETCURRENTPOSITION2 |
\r
4035 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4036 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4037 if ( FAILED( result ) ) {
\r
4038 output->Release();
\r
4039 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4040 errorText_ = errorStream_.str();
\r
4045 // Get the buffer size ... might be different from what we specified.
\r
4047 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4048 result = buffer->GetCaps( &dsbcaps );
\r
4049 if ( FAILED( result ) ) {
\r
4050 output->Release();
\r
4051 buffer->Release();
\r
4052 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4053 errorText_ = errorStream_.str();
\r
4057 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4059 // Lock the DS buffer
\r
4062 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4063 if ( FAILED( result ) ) {
\r
4064 output->Release();
\r
4065 buffer->Release();
\r
4066 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4067 errorText_ = errorStream_.str();
\r
4071 // Zero the DS buffer
\r
4072 ZeroMemory( audioPtr, dataLen );
\r
4074 // Unlock the DS buffer
\r
4075 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4076 if ( FAILED( result ) ) {
\r
4077 output->Release();
\r
4078 buffer->Release();
\r
4079 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4080 errorText_ = errorStream_.str();
\r
4084 ohandle = (void *) output;
\r
4085 bhandle = (void *) buffer;
\r
4088 if ( mode == INPUT ) {
\r
4090 LPDIRECTSOUNDCAPTURE input;
\r
4091 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4092 if ( FAILED( result ) ) {
\r
4093 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4094 errorText_ = errorStream_.str();
\r
4099 inCaps.dwSize = sizeof( inCaps );
\r
4100 result = input->GetCaps( &inCaps );
\r
4101 if ( FAILED( result ) ) {
\r
4103 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4104 errorText_ = errorStream_.str();
\r
4108 // Check channel information.
\r
4109 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4110 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4114 // Check format information. Use 16-bit format unless user
\r
4115 // requests 8-bit.
\r
4116 DWORD deviceFormats;
\r
4117 if ( channels + firstChannel == 2 ) {
\r
4118 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4119 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4120 waveFormat.wBitsPerSample = 8;
\r
4121 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4123 else { // assume 16-bit is supported
\r
4124 waveFormat.wBitsPerSample = 16;
\r
4125 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4128 else { // channel == 1
\r
4129 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4130 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4131 waveFormat.wBitsPerSample = 8;
\r
4132 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4134 else { // assume 16-bit is supported
\r
4135 waveFormat.wBitsPerSample = 16;
\r
4136 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4139 stream_.userFormat = format;
\r
4141 // Update wave format structure and buffer information.
\r
4142 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4143 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4144 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4146 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4147 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4148 dsBufferSize *= 2;
\r
4150 // Setup the secondary DS buffer description.
\r
4151 DSCBUFFERDESC bufferDescription;
\r
4152 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4153 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4154 bufferDescription.dwFlags = 0;
\r
4155 bufferDescription.dwReserved = 0;
\r
4156 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4157 bufferDescription.lpwfxFormat = &waveFormat;
\r
4159 // Create the capture buffer.
\r
4160 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4161 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4162 if ( FAILED( result ) ) {
\r
4164 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4165 errorText_ = errorStream_.str();
\r
4169 // Get the buffer size ... might be different from what we specified.
\r
4170 DSCBCAPS dscbcaps;
\r
4171 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4172 result = buffer->GetCaps( &dscbcaps );
\r
4173 if ( FAILED( result ) ) {
\r
4175 buffer->Release();
\r
4176 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4177 errorText_ = errorStream_.str();
\r
4181 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4183 // NOTE: We could have a problem here if this is a duplex stream
\r
4184 // and the play and capture hardware buffer sizes are different
\r
4185 // (I'm actually not sure if that is a problem or not).
\r
4186 // Currently, we are not verifying that.
\r
4188 // Lock the capture buffer
\r
4191 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4192 if ( FAILED( result ) ) {
\r
4194 buffer->Release();
\r
4195 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4196 errorText_ = errorStream_.str();
\r
4200 // Zero the buffer
\r
4201 ZeroMemory( audioPtr, dataLen );
\r
4203 // Unlock the buffer
\r
4204 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4205 if ( FAILED( result ) ) {
\r
4207 buffer->Release();
\r
4208 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4209 errorText_ = errorStream_.str();
\r
4213 ohandle = (void *) input;
\r
4214 bhandle = (void *) buffer;
\r
4217 // Set various stream parameters
\r
4218 DsHandle *handle = 0;
\r
4219 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4220 stream_.nUserChannels[mode] = channels;
\r
4221 stream_.bufferSize = *bufferSize;
\r
4222 stream_.channelOffset[mode] = firstChannel;
\r
4223 stream_.deviceInterleaved[mode] = true;
\r
4224 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4225 else stream_.userInterleaved = true;
\r
4227 // Set flag for buffer conversion
\r
4228 stream_.doConvertBuffer[mode] = false;
\r
4229 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4230 stream_.doConvertBuffer[mode] = true;
\r
4231 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4232 stream_.doConvertBuffer[mode] = true;
\r
4233 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4234 stream_.nUserChannels[mode] > 1 )
\r
4235 stream_.doConvertBuffer[mode] = true;
\r
4237 // Allocate necessary internal buffers
\r
4238 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4239 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4240 if ( stream_.userBuffer[mode] == NULL ) {
\r
4241 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4245 if ( stream_.doConvertBuffer[mode] ) {
\r
4247 bool makeBuffer = true;
\r
4248 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4249 if ( mode == INPUT ) {
\r
4250 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4251 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4252 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4256 if ( makeBuffer ) {
\r
4257 bufferBytes *= *bufferSize;
\r
4258 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4259 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4260 if ( stream_.deviceBuffer == NULL ) {
\r
4261 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4267 // Allocate our DsHandle structures for the stream.
\r
4268 if ( stream_.apiHandle == 0 ) {
\r
4270 handle = new DsHandle;
\r
4272 catch ( std::bad_alloc& ) {
\r
4273 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4277 // Create a manual-reset event.
\r
4278 handle->condition = CreateEvent( NULL, // no security
\r
4279 TRUE, // manual-reset
\r
4280 FALSE, // non-signaled initially
\r
4281 NULL ); // unnamed
\r
4282 stream_.apiHandle = (void *) handle;
\r
4285 handle = (DsHandle *) stream_.apiHandle;
\r
4286 handle->id[mode] = ohandle;
\r
4287 handle->buffer[mode] = bhandle;
\r
4288 handle->dsBufferSize[mode] = dsBufferSize;
\r
4289 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4291 stream_.device[mode] = device;
\r
4292 stream_.state = STREAM_STOPPED;
\r
4293 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4294 // We had already set up an output stream.
\r
4295 stream_.mode = DUPLEX;
\r
4297 stream_.mode = mode;
\r
4298 stream_.nBuffers = nBuffers;
\r
4299 stream_.sampleRate = sampleRate;
\r
4301 // Setup the buffer conversion information structure.
\r
4302 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4304 // Setup the callback thread.
\r
4305 if ( stream_.callbackInfo.isRunning == false ) {
\r
4306 unsigned threadId;
\r
4307 stream_.callbackInfo.isRunning = true;
\r
4308 stream_.callbackInfo.object = (void *) this;
\r
4309 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4310 &stream_.callbackInfo, 0, &threadId );
\r
4311 if ( stream_.callbackInfo.thread == 0 ) {
\r
4312 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4316 // Boost DS thread priority
\r
4317 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4323 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4324 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4325 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4326 if ( buffer ) buffer->Release();
\r
4327 object->Release();
\r
4329 if ( handle->buffer[1] ) {
\r
4330 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4331 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4332 if ( buffer ) buffer->Release();
\r
4333 object->Release();
\r
4335 CloseHandle( handle->condition );
\r
4337 stream_.apiHandle = 0;
\r
4340 for ( int i=0; i<2; i++ ) {
\r
4341 if ( stream_.userBuffer[i] ) {
\r
4342 free( stream_.userBuffer[i] );
\r
4343 stream_.userBuffer[i] = 0;
\r
4347 if ( stream_.deviceBuffer ) {
\r
4348 free( stream_.deviceBuffer );
\r
4349 stream_.deviceBuffer = 0;
\r
4352 stream_.state = STREAM_CLOSED;
\r
4356 void RtApiDs :: closeStream()
\r
4358 if ( stream_.state == STREAM_CLOSED ) {
\r
4359 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4360 error( RtAudioError::WARNING );
\r
4364 // Stop the callback thread.
\r
4365 stream_.callbackInfo.isRunning = false;
\r
4366 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4367 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4369 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4371 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4372 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4373 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4376 buffer->Release();
\r
4378 object->Release();
\r
4380 if ( handle->buffer[1] ) {
\r
4381 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4382 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4385 buffer->Release();
\r
4387 object->Release();
\r
4389 CloseHandle( handle->condition );
\r
4391 stream_.apiHandle = 0;
\r
4394 for ( int i=0; i<2; i++ ) {
\r
4395 if ( stream_.userBuffer[i] ) {
\r
4396 free( stream_.userBuffer[i] );
\r
4397 stream_.userBuffer[i] = 0;
\r
4401 if ( stream_.deviceBuffer ) {
\r
4402 free( stream_.deviceBuffer );
\r
4403 stream_.deviceBuffer = 0;
\r
4406 stream_.mode = UNINITIALIZED;
\r
4407 stream_.state = STREAM_CLOSED;
\r
4410 void RtApiDs :: startStream()
\r
4413 if ( stream_.state == STREAM_RUNNING ) {
\r
4414 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4415 error( RtAudioError::WARNING );
\r
4419 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4421 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4422 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4423 // this is already in effect.
\r
4424 timeBeginPeriod( 1 );
\r
4426 buffersRolling = false;
\r
4427 duplexPrerollBytes = 0;
\r
4429 if ( stream_.mode == DUPLEX ) {
\r
4430 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4431 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4434 HRESULT result = 0;
\r
4435 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4437 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4438 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4439 if ( FAILED( result ) ) {
\r
4440 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4441 errorText_ = errorStream_.str();
\r
4446 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4448 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4449 result = buffer->Start( DSCBSTART_LOOPING );
\r
4450 if ( FAILED( result ) ) {
\r
4451 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4452 errorText_ = errorStream_.str();
\r
4457 handle->drainCounter = 0;
\r
4458 handle->internalDrain = false;
\r
4459 ResetEvent( handle->condition );
\r
4460 stream_.state = STREAM_RUNNING;
\r
4463 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4466 void RtApiDs :: stopStream()
\r
4469 if ( stream_.state == STREAM_STOPPED ) {
\r
4470 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4471 error( RtAudioError::WARNING );
\r
4475 HRESULT result = 0;
\r
4478 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4479 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4480 if ( handle->drainCounter == 0 ) {
\r
4481 handle->drainCounter = 2;
\r
4482 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4485 stream_.state = STREAM_STOPPED;
\r
4487 // Stop the buffer and clear memory
\r
4488 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4489 result = buffer->Stop();
\r
4490 if ( FAILED( result ) ) {
\r
4491 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4492 errorText_ = errorStream_.str();
\r
4496 // Lock the buffer and clear it so that if we start to play again,
\r
4497 // we won't have old data playing.
\r
4498 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4499 if ( FAILED( result ) ) {
\r
4500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4501 errorText_ = errorStream_.str();
\r
4505 // Zero the DS buffer
\r
4506 ZeroMemory( audioPtr, dataLen );
\r
4508 // Unlock the DS buffer
\r
4509 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4510 if ( FAILED( result ) ) {
\r
4511 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4512 errorText_ = errorStream_.str();
\r
4516 // If we start playing again, we must begin at beginning of buffer.
\r
4517 handle->bufferPointer[0] = 0;
\r
4520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4521 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4525 stream_.state = STREAM_STOPPED;
\r
4527 result = buffer->Stop();
\r
4528 if ( FAILED( result ) ) {
\r
4529 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4530 errorText_ = errorStream_.str();
\r
4534 // Lock the buffer and clear it so that if we start to play again,
\r
4535 // we won't have old data playing.
\r
4536 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4537 if ( FAILED( result ) ) {
\r
4538 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4539 errorText_ = errorStream_.str();
\r
4543 // Zero the DS buffer
\r
4544 ZeroMemory( audioPtr, dataLen );
\r
4546 // Unlock the DS buffer
\r
4547 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4548 if ( FAILED( result ) ) {
\r
4549 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4550 errorText_ = errorStream_.str();
\r
4554 // If we start recording again, we must begin at beginning of buffer.
\r
4555 handle->bufferPointer[1] = 0;
\r
4559 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4560 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
4563 void RtApiDs :: abortStream()
\r
4566 if ( stream_.state == STREAM_STOPPED ) {
\r
4567 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4568 error( RtAudioError::WARNING );
\r
4572 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4573 handle->drainCounter = 2;
\r
4578 void RtApiDs :: callbackEvent()
\r
4580 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4581 Sleep( 50 ); // sleep 50 milliseconds
\r
4585 if ( stream_.state == STREAM_CLOSED ) {
\r
4586 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4587 error( RtAudioError::WARNING );
\r
4591 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4592 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4594 // Check if we were draining the stream and signal is finished.
\r
4595 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4597 stream_.state = STREAM_STOPPING;
\r
4598 if ( handle->internalDrain == false )
\r
4599 SetEvent( handle->condition );
\r
4605 // Invoke user callback to get fresh output data UNLESS we are
\r
4606 // draining stream.
\r
4607 if ( handle->drainCounter == 0 ) {
\r
4608 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4609 double streamTime = getStreamTime();
\r
4610 RtAudioStreamStatus status = 0;
\r
4611 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4612 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4613 handle->xrun[0] = false;
\r
4615 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4616 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4617 handle->xrun[1] = false;
\r
4619 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4620 stream_.bufferSize, streamTime, status, info->userData );
\r
4621 if ( cbReturnValue == 2 ) {
\r
4622 stream_.state = STREAM_STOPPING;
\r
4623 handle->drainCounter = 2;
\r
4627 else if ( cbReturnValue == 1 ) {
\r
4628 handle->drainCounter = 1;
\r
4629 handle->internalDrain = true;
\r
4634 DWORD currentWritePointer, safeWritePointer;
\r
4635 DWORD currentReadPointer, safeReadPointer;
\r
4636 UINT nextWritePointer;
\r
4638 LPVOID buffer1 = NULL;
\r
4639 LPVOID buffer2 = NULL;
\r
4640 DWORD bufferSize1 = 0;
\r
4641 DWORD bufferSize2 = 0;
\r
4646 if ( buffersRolling == false ) {
\r
4647 if ( stream_.mode == DUPLEX ) {
\r
4648 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4650 // It takes a while for the devices to get rolling. As a result,
\r
4651 // there's no guarantee that the capture and write device pointers
\r
4652 // will move in lockstep. Wait here for both devices to start
\r
4653 // rolling, and then set our buffer pointers accordingly.
\r
4654 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4655 // bytes later than the write buffer.
\r
4657 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4658 // take place between the two GetCurrentPosition calls... but I'm
\r
4659 // really not sure how to solve the problem. Temporarily boost to
\r
4660 // Realtime priority, maybe; but I'm not sure what priority the
\r
4661 // DirectSound service threads run at. We *should* be roughly
\r
4662 // within a ms or so of correct.
\r
4664 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4665 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4667 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4669 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4670 if ( FAILED( result ) ) {
\r
4671 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4672 errorText_ = errorStream_.str();
\r
4673 error( RtAudioError::SYSTEM_ERROR );
\r
4676 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4677 if ( FAILED( result ) ) {
\r
4678 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4679 errorText_ = errorStream_.str();
\r
4680 error( RtAudioError::SYSTEM_ERROR );
\r
4684 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4685 if ( FAILED( result ) ) {
\r
4686 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4687 errorText_ = errorStream_.str();
\r
4688 error( RtAudioError::SYSTEM_ERROR );
\r
4691 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4692 if ( FAILED( result ) ) {
\r
4693 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4694 errorText_ = errorStream_.str();
\r
4695 error( RtAudioError::SYSTEM_ERROR );
\r
4698 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4702 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4704 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4705 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4706 handle->bufferPointer[1] = safeReadPointer;
\r
4708 else if ( stream_.mode == OUTPUT ) {
\r
4710 // Set the proper nextWritePosition after initial startup.
\r
4711 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4712 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4713 if ( FAILED( result ) ) {
\r
4714 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4715 errorText_ = errorStream_.str();
\r
4716 error( RtAudioError::SYSTEM_ERROR );
\r
4719 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4720 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4723 buffersRolling = true;
\r
4726 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4728 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4730 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4731 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4732 bufferBytes *= formatBytes( stream_.userFormat );
\r
4733 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4736 // Setup parameters and do buffer conversion if necessary.
\r
4737 if ( stream_.doConvertBuffer[0] ) {
\r
4738 buffer = stream_.deviceBuffer;
\r
4739 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4740 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4741 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4744 buffer = stream_.userBuffer[0];
\r
4745 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4746 bufferBytes *= formatBytes( stream_.userFormat );
\r
4749 // No byte swapping necessary in DirectSound implementation.
\r
4751 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4752 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4754 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4755 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4757 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4758 nextWritePointer = handle->bufferPointer[0];
\r
4760 DWORD endWrite, leadPointer;
\r
4762 // Find out where the read and "safe write" pointers are.
\r
4763 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4764 if ( FAILED( result ) ) {
\r
4765 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4766 errorText_ = errorStream_.str();
\r
4767 error( RtAudioError::SYSTEM_ERROR );
\r
4771 // We will copy our output buffer into the region between
\r
4772 // safeWritePointer and leadPointer. If leadPointer is not
\r
4773 // beyond the next endWrite position, wait until it is.
\r
4774 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4775 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4776 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4777 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4778 endWrite = nextWritePointer + bufferBytes;
\r
4780 // Check whether the entire write region is behind the play pointer.
\r
4781 if ( leadPointer >= endWrite ) break;
\r
4783 // If we are here, then we must wait until the leadPointer advances
\r
4784 // beyond the end of our next write region. We use the
\r
4785 // Sleep() function to suspend operation until that happens.
\r
4786 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4787 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4788 if ( millis < 1.0 ) millis = 1.0;
\r
4789 Sleep( (DWORD) millis );
\r
4792 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4793 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4794 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4795 handle->xrun[0] = true;
\r
4796 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4797 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4798 handle->bufferPointer[0] = nextWritePointer;
\r
4799 endWrite = nextWritePointer + bufferBytes;
\r
4802 // Lock free space in the buffer
\r
4803 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4804 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4805 if ( FAILED( result ) ) {
\r
4806 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4807 errorText_ = errorStream_.str();
\r
4808 error( RtAudioError::SYSTEM_ERROR );
\r
4812 // Copy our buffer into the DS buffer
\r
4813 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4814 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4816 // Update our buffer offset and unlock sound buffer
\r
4817 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4818 if ( FAILED( result ) ) {
\r
4819 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4820 errorText_ = errorStream_.str();
\r
4821 error( RtAudioError::SYSTEM_ERROR );
\r
4824 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4825 handle->bufferPointer[0] = nextWritePointer;
\r
4827 if ( handle->drainCounter ) {
\r
4828 handle->drainCounter++;
\r
4833 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4835 // Setup parameters.
\r
4836 if ( stream_.doConvertBuffer[1] ) {
\r
4837 buffer = stream_.deviceBuffer;
\r
4838 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4839 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4842 buffer = stream_.userBuffer[1];
\r
4843 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4844 bufferBytes *= formatBytes( stream_.userFormat );
\r
4847 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4848 long nextReadPointer = handle->bufferPointer[1];
\r
4849 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4851 // Find out where the write and "safe read" pointers are.
\r
4852 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4853 if ( FAILED( result ) ) {
\r
4854 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4855 errorText_ = errorStream_.str();
\r
4856 error( RtAudioError::SYSTEM_ERROR );
\r
4860 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4861 DWORD endRead = nextReadPointer + bufferBytes;
\r
4863 // Handling depends on whether we are INPUT or DUPLEX.
\r
4864 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4865 // then a wait here will drag the write pointers into the forbidden zone.
\r
4867 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4868 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4869 // practical way to sync up the read and write pointers reliably, given the
\r
4870 // the very complex relationship between phase and increment of the read and write
\r
4873 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4874 // provide a pre-roll period of 0.5 seconds in which we return
\r
4875 // zeros from the read buffer while the pointers sync up.
\r
4877 if ( stream_.mode == DUPLEX ) {
\r
4878 if ( safeReadPointer < endRead ) {
\r
4879 if ( duplexPrerollBytes <= 0 ) {
\r
4880 // Pre-roll time over. Be more agressive.
\r
4881 int adjustment = endRead-safeReadPointer;
\r
4883 handle->xrun[1] = true;
\r
4885 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4886 // and perform fine adjustments later.
\r
4887 // - small adjustments: back off by twice as much.
\r
4888 if ( adjustment >= 2*bufferBytes )
\r
4889 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4891 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4893 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4897 // In pre=roll time. Just do it.
\r
4898 nextReadPointer = safeReadPointer - bufferBytes;
\r
4899 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4901 endRead = nextReadPointer + bufferBytes;
\r
4904 else { // mode == INPUT
\r
4905 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4906 // See comments for playback.
\r
4907 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4908 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4909 if ( millis < 1.0 ) millis = 1.0;
\r
4910 Sleep( (DWORD) millis );
\r
4912 // Wake up and find out where we are now.
\r
4913 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4914 if ( FAILED( result ) ) {
\r
4915 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4916 errorText_ = errorStream_.str();
\r
4917 error( RtAudioError::SYSTEM_ERROR );
\r
4921 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4925 // Lock free space in the buffer
\r
4926 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4927 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4928 if ( FAILED( result ) ) {
\r
4929 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4930 errorText_ = errorStream_.str();
\r
4931 error( RtAudioError::SYSTEM_ERROR );
\r
4935 if ( duplexPrerollBytes <= 0 ) {
\r
4936 // Copy our buffer into the DS buffer
\r
4937 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4938 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4941 memset( buffer, 0, bufferSize1 );
\r
4942 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4943 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4946 // Update our buffer offset and unlock sound buffer
\r
4947 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4948 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4949 if ( FAILED( result ) ) {
\r
4950 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4951 errorText_ = errorStream_.str();
\r
4952 error( RtAudioError::SYSTEM_ERROR );
\r
4955 handle->bufferPointer[1] = nextReadPointer;
\r
4957 // No byte swapping necessary in DirectSound implementation.
\r
4959 // If necessary, convert 8-bit data from unsigned to signed.
\r
4960 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4961 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4963 // Do buffer conversion if necessary.
\r
4964 if ( stream_.doConvertBuffer[1] )
\r
4965 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4969 RtApi::tickStreamTime();
\r
4972 // Definitions for utility functions and callbacks
\r
4973 // specific to the DirectSound implementation.
\r
4975 static unsigned __stdcall callbackHandler( void *ptr )
\r
4977 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4978 RtApiDs *object = (RtApiDs *) info->object;
\r
4979 bool* isRunning = &info->isRunning;
\r
4981 while ( *isRunning == true ) {
\r
4982 object->callbackEvent();
\r
4985 _endthreadex( 0 );
\r
4989 #include "tchar.h"
\r
4991 static std::string convertTChar( LPCTSTR name )
\r
4993 #if defined( UNICODE ) || defined( _UNICODE )
\r
4994 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4995 std::string s( length-1, '\0' );
\r
4996 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
\r
4998 std::string s( name );
\r
5004 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5005 LPCTSTR description,
\r
5007 LPVOID lpContext )
\r
5009 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
5010 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
5013 bool validDevice = false;
\r
5014 if ( probeInfo.isInput == true ) {
\r
5016 LPDIRECTSOUNDCAPTURE object;
\r
5018 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5019 if ( hr != DS_OK ) return TRUE;
\r
5021 caps.dwSize = sizeof(caps);
\r
5022 hr = object->GetCaps( &caps );
\r
5023 if ( hr == DS_OK ) {
\r
5024 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5025 validDevice = true;
\r
5027 object->Release();
\r
5031 LPDIRECTSOUND object;
\r
5032 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5033 if ( hr != DS_OK ) return TRUE;
\r
5035 caps.dwSize = sizeof(caps);
\r
5036 hr = object->GetCaps( &caps );
\r
5037 if ( hr == DS_OK ) {
\r
5038 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5039 validDevice = true;
\r
5041 object->Release();
\r
5044 // If good device, then save its name and guid.
\r
5045 std::string name = convertTChar( description );
\r
5046 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5047 if ( lpguid == NULL )
\r
5048 name = "Default Device";
\r
5049 if ( validDevice ) {
\r
5050 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5051 if ( dsDevices[i].name == name ) {
\r
5052 dsDevices[i].found = true;
\r
5053 if ( probeInfo.isInput ) {
\r
5054 dsDevices[i].id[1] = lpguid;
\r
5055 dsDevices[i].validId[1] = true;
\r
5058 dsDevices[i].id[0] = lpguid;
\r
5059 dsDevices[i].validId[0] = true;
\r
5066 device.name = name;
\r
5067 device.found = true;
\r
5068 if ( probeInfo.isInput ) {
\r
5069 device.id[1] = lpguid;
\r
5070 device.validId[1] = true;
\r
5073 device.id[0] = lpguid;
\r
5074 device.validId[0] = true;
\r
5076 dsDevices.push_back( device );
\r
5082 static const char* getErrorString( int code )
\r
5086 case DSERR_ALLOCATED:
\r
5087 return "Already allocated";
\r
5089 case DSERR_CONTROLUNAVAIL:
\r
5090 return "Control unavailable";
\r
5092 case DSERR_INVALIDPARAM:
\r
5093 return "Invalid parameter";
\r
5095 case DSERR_INVALIDCALL:
\r
5096 return "Invalid call";
\r
5098 case DSERR_GENERIC:
\r
5099 return "Generic error";
\r
5101 case DSERR_PRIOLEVELNEEDED:
\r
5102 return "Priority level needed";
\r
5104 case DSERR_OUTOFMEMORY:
\r
5105 return "Out of memory";
\r
5107 case DSERR_BADFORMAT:
\r
5108 return "The sample rate or the channel format is not supported";
\r
5110 case DSERR_UNSUPPORTED:
\r
5111 return "Not supported";
\r
5113 case DSERR_NODRIVER:
\r
5114 return "No driver";
\r
5116 case DSERR_ALREADYINITIALIZED:
\r
5117 return "Already initialized";
\r
5119 case DSERR_NOAGGREGATION:
\r
5120 return "No aggregation";
\r
5122 case DSERR_BUFFERLOST:
\r
5123 return "Buffer lost";
\r
5125 case DSERR_OTHERAPPHASPRIO:
\r
5126 return "Another application already has priority";
\r
5128 case DSERR_UNINITIALIZED:
\r
5129 return "Uninitialized";
\r
5132 return "DirectSound unknown error";
\r
5135 //******************** End of __WINDOWS_DS__ *********************//
\r
5139 #if defined(__LINUX_ALSA__)
\r
5141 #include <alsa/asoundlib.h>
\r
5142 #include <unistd.h>
\r
5144 // A structure to hold various information related to the ALSA API
\r
5145 // implementation.
\r
5146 struct AlsaHandle {
\r
5147 snd_pcm_t *handles[2];
\r
5148 bool synchronized;
\r
5150 pthread_cond_t runnable_cv;
\r
5154 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5157 static void *alsaCallbackHandler( void * ptr );
\r
5159 RtApiAlsa :: RtApiAlsa()
\r
5161 // Nothing to do here.
\r
5164 RtApiAlsa :: ~RtApiAlsa()
\r
5166 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5169 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5171 unsigned nDevices = 0;
\r
5172 int result, subdevice, card;
\r
5174 snd_ctl_t *handle;
\r
5176 // Count cards and devices
\r
5178 snd_card_next( &card );
\r
5179 while ( card >= 0 ) {
\r
5180 sprintf( name, "hw:%d", card );
\r
5181 result = snd_ctl_open( &handle, name, 0 );
\r
5182 if ( result < 0 ) {
\r
5183 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5184 errorText_ = errorStream_.str();
\r
5185 error( RtAudioError::WARNING );
\r
5190 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5191 if ( result < 0 ) {
\r
5192 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5193 errorText_ = errorStream_.str();
\r
5194 error( RtAudioError::WARNING );
\r
5197 if ( subdevice < 0 )
\r
5202 snd_ctl_close( handle );
\r
5203 snd_card_next( &card );
\r
5206 result = snd_ctl_open( &handle, "default", 0 );
\r
5207 if (result == 0) {
\r
5209 snd_ctl_close( handle );
\r
5215 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5217 RtAudio::DeviceInfo info;
\r
5218 info.probed = false;
\r
5220 unsigned nDevices = 0;
\r
5221 int result, subdevice, card;
\r
5223 snd_ctl_t *chandle;
\r
5225 // Count cards and devices
\r
5227 snd_card_next( &card );
\r
5228 while ( card >= 0 ) {
\r
5229 sprintf( name, "hw:%d", card );
\r
5230 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5231 if ( result < 0 ) {
\r
5232 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5233 errorText_ = errorStream_.str();
\r
5234 error( RtAudioError::WARNING );
\r
5239 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5240 if ( result < 0 ) {
\r
5241 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5242 errorText_ = errorStream_.str();
\r
5243 error( RtAudioError::WARNING );
\r
5246 if ( subdevice < 0 ) break;
\r
5247 if ( nDevices == device ) {
\r
5248 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5254 snd_ctl_close( chandle );
\r
5255 snd_card_next( &card );
\r
5258 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5259 if ( result == 0 ) {
\r
5260 if ( nDevices == device ) {
\r
5261 strcpy( name, "default" );
\r
5267 if ( nDevices == 0 ) {
\r
5268 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5269 error( RtAudioError::INVALID_USE );
\r
5273 if ( device >= nDevices ) {
\r
5274 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5275 error( RtAudioError::INVALID_USE );
\r
5281 // If a stream is already open, we cannot probe the stream devices.
\r
5282 // Thus, use the saved results.
\r
5283 if ( stream_.state != STREAM_CLOSED &&
\r
5284 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5285 snd_ctl_close( chandle );
\r
5286 if ( device >= devices_.size() ) {
\r
5287 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5288 error( RtAudioError::WARNING );
\r
5291 return devices_[ device ];
\r
5294 int openMode = SND_PCM_ASYNC;
\r
5295 snd_pcm_stream_t stream;
\r
5296 snd_pcm_info_t *pcminfo;
\r
5297 snd_pcm_info_alloca( &pcminfo );
\r
5298 snd_pcm_t *phandle;
\r
5299 snd_pcm_hw_params_t *params;
\r
5300 snd_pcm_hw_params_alloca( ¶ms );
\r
5302 // First try for playback unless default device (which has subdev -1)
\r
5303 stream = SND_PCM_STREAM_PLAYBACK;
\r
5304 snd_pcm_info_set_stream( pcminfo, stream );
\r
5305 if ( subdevice != -1 ) {
\r
5306 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5307 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5309 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5310 if ( result < 0 ) {
\r
5311 // Device probably doesn't support playback.
\r
5312 goto captureProbe;
\r
5316 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5317 if ( result < 0 ) {
\r
5318 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5319 errorText_ = errorStream_.str();
\r
5320 error( RtAudioError::WARNING );
\r
5321 goto captureProbe;
\r
5324 // The device is open ... fill the parameter structure.
\r
5325 result = snd_pcm_hw_params_any( phandle, params );
\r
5326 if ( result < 0 ) {
\r
5327 snd_pcm_close( phandle );
\r
5328 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5329 errorText_ = errorStream_.str();
\r
5330 error( RtAudioError::WARNING );
\r
5331 goto captureProbe;
\r
5334 // Get output channel information.
\r
5335 unsigned int value;
\r
5336 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5337 if ( result < 0 ) {
\r
5338 snd_pcm_close( phandle );
\r
5339 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5340 errorText_ = errorStream_.str();
\r
5341 error( RtAudioError::WARNING );
\r
5342 goto captureProbe;
\r
5344 info.outputChannels = value;
\r
5345 snd_pcm_close( phandle );
\r
5348 stream = SND_PCM_STREAM_CAPTURE;
\r
5349 snd_pcm_info_set_stream( pcminfo, stream );
\r
5351 // Now try for capture unless default device (with subdev = -1)
\r
5352 if ( subdevice != -1 ) {
\r
5353 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5354 snd_ctl_close( chandle );
\r
5355 if ( result < 0 ) {
\r
5356 // Device probably doesn't support capture.
\r
5357 if ( info.outputChannels == 0 ) return info;
\r
5358 goto probeParameters;
\r
5362 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5363 if ( result < 0 ) {
\r
5364 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5365 errorText_ = errorStream_.str();
\r
5366 error( RtAudioError::WARNING );
\r
5367 if ( info.outputChannels == 0 ) return info;
\r
5368 goto probeParameters;
\r
5371 // The device is open ... fill the parameter structure.
\r
5372 result = snd_pcm_hw_params_any( phandle, params );
\r
5373 if ( result < 0 ) {
\r
5374 snd_pcm_close( phandle );
\r
5375 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5376 errorText_ = errorStream_.str();
\r
5377 error( RtAudioError::WARNING );
\r
5378 if ( info.outputChannels == 0 ) return info;
\r
5379 goto probeParameters;
\r
5382 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5383 if ( result < 0 ) {
\r
5384 snd_pcm_close( phandle );
\r
5385 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5386 errorText_ = errorStream_.str();
\r
5387 error( RtAudioError::WARNING );
\r
5388 if ( info.outputChannels == 0 ) return info;
\r
5389 goto probeParameters;
\r
5391 info.inputChannels = value;
\r
5392 snd_pcm_close( phandle );
\r
5394 // If device opens for both playback and capture, we determine the channels.
\r
5395 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5396 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5398 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5399 if ( device == 0 && info.outputChannels > 0 )
\r
5400 info.isDefaultOutput = true;
\r
5401 if ( device == 0 && info.inputChannels > 0 )
\r
5402 info.isDefaultInput = true;
\r
5405 // At this point, we just need to figure out the supported data
\r
5406 // formats and sample rates. We'll proceed by opening the device in
\r
5407 // the direction with the maximum number of channels, or playback if
\r
5408 // they are equal. This might limit our sample rate options, but so
\r
5411 if ( info.outputChannels >= info.inputChannels )
\r
5412 stream = SND_PCM_STREAM_PLAYBACK;
\r
5414 stream = SND_PCM_STREAM_CAPTURE;
\r
5415 snd_pcm_info_set_stream( pcminfo, stream );
\r
5417 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5418 if ( result < 0 ) {
\r
5419 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5420 errorText_ = errorStream_.str();
\r
5421 error( RtAudioError::WARNING );
\r
5425 // The device is open ... fill the parameter structure.
\r
5426 result = snd_pcm_hw_params_any( phandle, params );
\r
5427 if ( result < 0 ) {
\r
5428 snd_pcm_close( phandle );
\r
5429 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5430 errorText_ = errorStream_.str();
\r
5431 error( RtAudioError::WARNING );
\r
5435 // Test our discrete set of sample rate values.
\r
5436 info.sampleRates.clear();
\r
5437 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5438 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5439 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5441 if ( info.sampleRates.size() == 0 ) {
\r
5442 snd_pcm_close( phandle );
\r
5443 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5444 errorText_ = errorStream_.str();
\r
5445 error( RtAudioError::WARNING );
\r
5449 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5450 snd_pcm_format_t format;
\r
5451 info.nativeFormats = 0;
\r
5452 format = SND_PCM_FORMAT_S8;
\r
5453 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5454 info.nativeFormats |= RTAUDIO_SINT8;
\r
5455 format = SND_PCM_FORMAT_S16;
\r
5456 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5457 info.nativeFormats |= RTAUDIO_SINT16;
\r
5458 format = SND_PCM_FORMAT_S24;
\r
5459 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5460 info.nativeFormats |= RTAUDIO_SINT24;
\r
5461 format = SND_PCM_FORMAT_S32;
\r
5462 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5463 info.nativeFormats |= RTAUDIO_SINT32;
\r
5464 format = SND_PCM_FORMAT_FLOAT;
\r
5465 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5466 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5467 format = SND_PCM_FORMAT_FLOAT64;
\r
5468 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5469 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5471 // Check that we have at least one supported format
\r
5472 if ( info.nativeFormats == 0 ) {
\r
5473 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5474 errorText_ = errorStream_.str();
\r
5475 error( RtAudioError::WARNING );
\r
5479 // Get the device name
\r
5481 result = snd_card_get_name( card, &cardname );
\r
5482 if ( result >= 0 )
\r
5483 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5486 // That's all ... close the device and return
\r
5487 snd_pcm_close( phandle );
\r
5488 info.probed = true;
\r
5492 void RtApiAlsa :: saveDeviceInfo( void )
\r
5496 unsigned int nDevices = getDeviceCount();
\r
5497 devices_.resize( nDevices );
\r
5498 for ( unsigned int i=0; i<nDevices; i++ )
\r
5499 devices_[i] = getDeviceInfo( i );
\r
5502 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5503 unsigned int firstChannel, unsigned int sampleRate,
\r
5504 RtAudioFormat format, unsigned int *bufferSize,
\r
5505 RtAudio::StreamOptions *options )
\r
5508 #if defined(__RTAUDIO_DEBUG__)
\r
5509 snd_output_t *out;
\r
5510 snd_output_stdio_attach(&out, stderr, 0);
\r
5513 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5515 unsigned nDevices = 0;
\r
5516 int result, subdevice, card;
\r
5518 snd_ctl_t *chandle;
\r
5520 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5521 snprintf(name, sizeof(name), "%s", "default");
\r
5523 // Count cards and devices
\r
5525 snd_card_next( &card );
\r
5526 while ( card >= 0 ) {
\r
5527 sprintf( name, "hw:%d", card );
\r
5528 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5529 if ( result < 0 ) {
\r
5530 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5531 errorText_ = errorStream_.str();
\r
5536 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5537 if ( result < 0 ) break;
\r
5538 if ( subdevice < 0 ) break;
\r
5539 if ( nDevices == device ) {
\r
5540 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5541 snd_ctl_close( chandle );
\r
5546 snd_ctl_close( chandle );
\r
5547 snd_card_next( &card );
\r
5550 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
5551 if ( result == 0 ) {
\r
5552 if ( nDevices == device ) {
\r
5553 strcpy( name, "default" );
\r
5559 if ( nDevices == 0 ) {
\r
5560 // This should not happen because a check is made before this function is called.
\r
5561 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5565 if ( device >= nDevices ) {
\r
5566 // This should not happen because a check is made before this function is called.
\r
5567 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5574 // The getDeviceInfo() function will not work for a device that is
\r
5575 // already open. Thus, we'll probe the system before opening a
\r
5576 // stream and save the results for use by getDeviceInfo().
\r
5577 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5578 this->saveDeviceInfo();
\r
5580 snd_pcm_stream_t stream;
\r
5581 if ( mode == OUTPUT )
\r
5582 stream = SND_PCM_STREAM_PLAYBACK;
\r
5584 stream = SND_PCM_STREAM_CAPTURE;
\r
5586 snd_pcm_t *phandle;
\r
5587 int openMode = SND_PCM_ASYNC;
\r
5588 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5589 if ( result < 0 ) {
\r
5590 if ( mode == OUTPUT )
\r
5591 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5593 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5594 errorText_ = errorStream_.str();
\r
5598 // Fill the parameter structure.
\r
5599 snd_pcm_hw_params_t *hw_params;
\r
5600 snd_pcm_hw_params_alloca( &hw_params );
\r
5601 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5602 if ( result < 0 ) {
\r
5603 snd_pcm_close( phandle );
\r
5604 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5605 errorText_ = errorStream_.str();
\r
5609 #if defined(__RTAUDIO_DEBUG__)
\r
5610 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5611 snd_pcm_hw_params_dump( hw_params, out );
\r
5614 // Set access ... check user preference.
\r
5615 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5616 stream_.userInterleaved = false;
\r
5617 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5618 if ( result < 0 ) {
\r
5619 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5620 stream_.deviceInterleaved[mode] = true;
\r
5623 stream_.deviceInterleaved[mode] = false;
\r
5626 stream_.userInterleaved = true;
\r
5627 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5628 if ( result < 0 ) {
\r
5629 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5630 stream_.deviceInterleaved[mode] = false;
\r
5633 stream_.deviceInterleaved[mode] = true;
\r
5636 if ( result < 0 ) {
\r
5637 snd_pcm_close( phandle );
\r
5638 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5639 errorText_ = errorStream_.str();
\r
5643 // Determine how to set the device format.
\r
5644 stream_.userFormat = format;
\r
5645 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5647 if ( format == RTAUDIO_SINT8 )
\r
5648 deviceFormat = SND_PCM_FORMAT_S8;
\r
5649 else if ( format == RTAUDIO_SINT16 )
\r
5650 deviceFormat = SND_PCM_FORMAT_S16;
\r
5651 else if ( format == RTAUDIO_SINT24 )
\r
5652 deviceFormat = SND_PCM_FORMAT_S24;
\r
5653 else if ( format == RTAUDIO_SINT32 )
\r
5654 deviceFormat = SND_PCM_FORMAT_S32;
\r
5655 else if ( format == RTAUDIO_FLOAT32 )
\r
5656 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5657 else if ( format == RTAUDIO_FLOAT64 )
\r
5658 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5660 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5661 stream_.deviceFormat[mode] = format;
\r
5665 // The user requested format is not natively supported by the device.
\r
5666 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5667 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5668 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5672 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5673 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5674 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5678 deviceFormat = SND_PCM_FORMAT_S32;
\r
5679 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5680 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5684 deviceFormat = SND_PCM_FORMAT_S24;
\r
5685 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5686 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5690 deviceFormat = SND_PCM_FORMAT_S16;
\r
5691 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5692 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5696 deviceFormat = SND_PCM_FORMAT_S8;
\r
5697 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5698 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5702 // If we get here, no supported format was found.
\r
5703 snd_pcm_close( phandle );
\r
5704 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5705 errorText_ = errorStream_.str();
\r
5709 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5710 if ( result < 0 ) {
\r
5711 snd_pcm_close( phandle );
\r
5712 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5713 errorText_ = errorStream_.str();
\r
5717 // Determine whether byte-swaping is necessary.
\r
5718 stream_.doByteSwap[mode] = false;
\r
5719 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5720 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5721 if ( result == 0 )
\r
5722 stream_.doByteSwap[mode] = true;
\r
5723 else if (result < 0) {
\r
5724 snd_pcm_close( phandle );
\r
5725 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5726 errorText_ = errorStream_.str();
\r
5731 // Set the sample rate.
\r
5732 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5733 if ( result < 0 ) {
\r
5734 snd_pcm_close( phandle );
\r
5735 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5736 errorText_ = errorStream_.str();
\r
5740 // Determine the number of channels for this device. We support a possible
\r
5741 // minimum device channel number > than the value requested by the user.
\r
5742 stream_.nUserChannels[mode] = channels;
\r
5743 unsigned int value;
\r
5744 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5745 unsigned int deviceChannels = value;
\r
5746 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5747 snd_pcm_close( phandle );
\r
5748 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5749 errorText_ = errorStream_.str();
\r
5753 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5754 if ( result < 0 ) {
\r
5755 snd_pcm_close( phandle );
\r
5756 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5757 errorText_ = errorStream_.str();
\r
5760 deviceChannels = value;
\r
5761 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5762 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5764 // Set the device channels.
\r
5765 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5766 if ( result < 0 ) {
\r
5767 snd_pcm_close( phandle );
\r
5768 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5769 errorText_ = errorStream_.str();
\r
5773 // Set the buffer (or period) size.
\r
5775 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5776 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5777 if ( result < 0 ) {
\r
5778 snd_pcm_close( phandle );
\r
5779 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5780 errorText_ = errorStream_.str();
\r
5783 *bufferSize = periodSize;
\r
5785 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5786 unsigned int periods = 0;
\r
5787 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5788 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5789 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5790 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5791 if ( result < 0 ) {
\r
5792 snd_pcm_close( phandle );
\r
5793 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5794 errorText_ = errorStream_.str();
\r
5798 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5799 // MUST be the same in both directions!
\r
5800 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5801 snd_pcm_close( phandle );
\r
5802 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5803 errorText_ = errorStream_.str();
\r
5807 stream_.bufferSize = *bufferSize;
\r
5809 // Install the hardware configuration
\r
5810 result = snd_pcm_hw_params( phandle, hw_params );
\r
5811 if ( result < 0 ) {
\r
5812 snd_pcm_close( phandle );
\r
5813 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5814 errorText_ = errorStream_.str();
\r
5818 #if defined(__RTAUDIO_DEBUG__)
\r
5819 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5820 snd_pcm_hw_params_dump( hw_params, out );
\r
5823 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5824 snd_pcm_sw_params_t *sw_params = NULL;
\r
5825 snd_pcm_sw_params_alloca( &sw_params );
\r
5826 snd_pcm_sw_params_current( phandle, sw_params );
\r
5827 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5828 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5829 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5831 // The following two settings were suggested by Theo Veenker
\r
5832 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5833 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5835 // here are two options for a fix
\r
5836 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5837 snd_pcm_uframes_t val;
\r
5838 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5839 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5841 result = snd_pcm_sw_params( phandle, sw_params );
\r
5842 if ( result < 0 ) {
\r
5843 snd_pcm_close( phandle );
\r
5844 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5845 errorText_ = errorStream_.str();
\r
5849 #if defined(__RTAUDIO_DEBUG__)
\r
5850 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5851 snd_pcm_sw_params_dump( sw_params, out );
\r
5854 // Set flags for buffer conversion
\r
5855 stream_.doConvertBuffer[mode] = false;
\r
5856 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5857 stream_.doConvertBuffer[mode] = true;
\r
5858 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5859 stream_.doConvertBuffer[mode] = true;
\r
5860 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5861 stream_.nUserChannels[mode] > 1 )
\r
5862 stream_.doConvertBuffer[mode] = true;
\r
5864 // Allocate the ApiHandle if necessary and then save.
\r
5865 AlsaHandle *apiInfo = 0;
\r
5866 if ( stream_.apiHandle == 0 ) {
\r
5868 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5870 catch ( std::bad_alloc& ) {
\r
5871 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5875 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5876 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5880 stream_.apiHandle = (void *) apiInfo;
\r
5881 apiInfo->handles[0] = 0;
\r
5882 apiInfo->handles[1] = 0;
\r
5885 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5887 apiInfo->handles[mode] = phandle;
\r
5890 // Allocate necessary internal buffers.
\r
5891 unsigned long bufferBytes;
\r
5892 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5893 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5894 if ( stream_.userBuffer[mode] == NULL ) {
\r
5895 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5899 if ( stream_.doConvertBuffer[mode] ) {
\r
5901 bool makeBuffer = true;
\r
5902 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5903 if ( mode == INPUT ) {
\r
5904 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5905 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5906 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5910 if ( makeBuffer ) {
\r
5911 bufferBytes *= *bufferSize;
\r
5912 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5913 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5914 if ( stream_.deviceBuffer == NULL ) {
\r
5915 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5921 stream_.sampleRate = sampleRate;
\r
5922 stream_.nBuffers = periods;
\r
5923 stream_.device[mode] = device;
\r
5924 stream_.state = STREAM_STOPPED;
\r
5926 // Setup the buffer conversion information structure.
\r
5927 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5929 // Setup thread if necessary.
\r
5930 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5931 // We had already set up an output stream.
\r
5932 stream_.mode = DUPLEX;
\r
5933 // Link the streams if possible.
\r
5934 apiInfo->synchronized = false;
\r
5935 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5936 apiInfo->synchronized = true;
\r
5938 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5939 error( RtAudioError::WARNING );
\r
5943 stream_.mode = mode;
\r
5945 // Setup callback thread.
\r
5946 stream_.callbackInfo.object = (void *) this;
\r
5948 // Set the thread attributes for joinable and realtime scheduling
\r
5949 // priority (optional). The higher priority will only take affect
\r
5950 // if the program is run as root or suid. Note, under Linux
\r
5951 // processes with CAP_SYS_NICE privilege, a user can change
\r
5952 // scheduling policy and priority (thus need not be root). See
\r
5953 // POSIX "capabilities".
\r
5954 pthread_attr_t attr;
\r
5955 pthread_attr_init( &attr );
\r
5956 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5958 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5959 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5960 // We previously attempted to increase the audio callback priority
\r
5961 // to SCHED_RR here via the attributes. However, while no errors
\r
5962 // were reported in doing so, it did not work. So, now this is
\r
5963 // done in the alsaCallbackHandler function.
\r
5964 stream_.callbackInfo.doRealtime = true;
\r
5965 int priority = options->priority;
\r
5966 int min = sched_get_priority_min( SCHED_RR );
\r
5967 int max = sched_get_priority_max( SCHED_RR );
\r
5968 if ( priority < min ) priority = min;
\r
5969 else if ( priority > max ) priority = max;
\r
5970 stream_.callbackInfo.priority = priority;
\r
5974 stream_.callbackInfo.isRunning = true;
\r
5975 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5976 pthread_attr_destroy( &attr );
\r
5978 stream_.callbackInfo.isRunning = false;
\r
5979 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5988 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5989 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5990 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5992 stream_.apiHandle = 0;
\r
5995 if ( phandle) snd_pcm_close( phandle );
\r
5997 for ( int i=0; i<2; i++ ) {
\r
5998 if ( stream_.userBuffer[i] ) {
\r
5999 free( stream_.userBuffer[i] );
\r
6000 stream_.userBuffer[i] = 0;
\r
6004 if ( stream_.deviceBuffer ) {
\r
6005 free( stream_.deviceBuffer );
\r
6006 stream_.deviceBuffer = 0;
\r
6009 stream_.state = STREAM_CLOSED;
\r
6013 void RtApiAlsa :: closeStream()
\r
6015 if ( stream_.state == STREAM_CLOSED ) {
\r
6016 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6017 error( RtAudioError::WARNING );
\r
6021 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6022 stream_.callbackInfo.isRunning = false;
\r
6023 MUTEX_LOCK( &stream_.mutex );
\r
6024 if ( stream_.state == STREAM_STOPPED ) {
\r
6025 apiInfo->runnable = true;
\r
6026 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6028 MUTEX_UNLOCK( &stream_.mutex );
\r
6029 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6031 if ( stream_.state == STREAM_RUNNING ) {
\r
6032 stream_.state = STREAM_STOPPED;
\r
6033 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6034 snd_pcm_drop( apiInfo->handles[0] );
\r
6035 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6036 snd_pcm_drop( apiInfo->handles[1] );
\r
6040 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6041 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6042 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6044 stream_.apiHandle = 0;
\r
6047 for ( int i=0; i<2; i++ ) {
\r
6048 if ( stream_.userBuffer[i] ) {
\r
6049 free( stream_.userBuffer[i] );
\r
6050 stream_.userBuffer[i] = 0;
\r
6054 if ( stream_.deviceBuffer ) {
\r
6055 free( stream_.deviceBuffer );
\r
6056 stream_.deviceBuffer = 0;
\r
6059 stream_.mode = UNINITIALIZED;
\r
6060 stream_.state = STREAM_CLOSED;
\r
6063 void RtApiAlsa :: startStream()
\r
6065 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6068 if ( stream_.state == STREAM_RUNNING ) {
\r
6069 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6070 error( RtAudioError::WARNING );
\r
6074 MUTEX_LOCK( &stream_.mutex );
\r
6077 snd_pcm_state_t state;
\r
6078 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6079 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6080 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6081 state = snd_pcm_state( handle[0] );
\r
6082 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6083 result = snd_pcm_prepare( handle[0] );
\r
6084 if ( result < 0 ) {
\r
6085 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6086 errorText_ = errorStream_.str();
\r
6092 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6093 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
6094 state = snd_pcm_state( handle[1] );
\r
6095 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6096 result = snd_pcm_prepare( handle[1] );
\r
6097 if ( result < 0 ) {
\r
6098 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6099 errorText_ = errorStream_.str();
\r
6105 stream_.state = STREAM_RUNNING;
\r
6108 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6109 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6110 MUTEX_UNLOCK( &stream_.mutex );
\r
6112 if ( result >= 0 ) return;
\r
6113 error( RtAudioError::SYSTEM_ERROR );
\r
6116 void RtApiAlsa :: stopStream()
\r
6119 if ( stream_.state == STREAM_STOPPED ) {
\r
6120 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6121 error( RtAudioError::WARNING );
\r
6125 stream_.state = STREAM_STOPPED;
\r
6126 MUTEX_LOCK( &stream_.mutex );
\r
6129 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6130 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6131 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6132 if ( apiInfo->synchronized )
\r
6133 result = snd_pcm_drop( handle[0] );
\r
6135 result = snd_pcm_drain( handle[0] );
\r
6136 if ( result < 0 ) {
\r
6137 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6138 errorText_ = errorStream_.str();
\r
6143 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6144 result = snd_pcm_drop( handle[1] );
\r
6145 if ( result < 0 ) {
\r
6146 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6147 errorText_ = errorStream_.str();
\r
6153 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
6154 MUTEX_UNLOCK( &stream_.mutex );
\r
6156 if ( result >= 0 ) return;
\r
6157 error( RtAudioError::SYSTEM_ERROR );
\r
6160 void RtApiAlsa :: abortStream()
\r
6163 if ( stream_.state == STREAM_STOPPED ) {
\r
6164 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6165 error( RtAudioError::WARNING );
\r
6169 stream_.state = STREAM_STOPPED;
\r
6170 MUTEX_LOCK( &stream_.mutex );
\r
6173 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6174 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6175 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6176 result = snd_pcm_drop( handle[0] );
\r
6177 if ( result < 0 ) {
\r
6178 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6179 errorText_ = errorStream_.str();
\r
6184 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6185 result = snd_pcm_drop( handle[1] );
\r
6186 if ( result < 0 ) {
\r
6187 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6188 errorText_ = errorStream_.str();
\r
6194 MUTEX_UNLOCK( &stream_.mutex );
\r
6196 if ( result >= 0 ) return;
\r
6197 error( RtAudioError::SYSTEM_ERROR );
\r
6200 void RtApiAlsa :: callbackEvent()
\r
6202 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6203 if ( stream_.state == STREAM_STOPPED ) {
\r
6204 MUTEX_LOCK( &stream_.mutex );
\r
6205 while ( !apiInfo->runnable )
\r
6206 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6208 if ( stream_.state != STREAM_RUNNING ) {
\r
6209 MUTEX_UNLOCK( &stream_.mutex );
\r
6212 MUTEX_UNLOCK( &stream_.mutex );
\r
6215 if ( stream_.state == STREAM_CLOSED ) {
\r
6216 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6217 error( RtAudioError::WARNING );
\r
6221 int doStopStream = 0;
\r
6222 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6223 double streamTime = getStreamTime();
\r
6224 RtAudioStreamStatus status = 0;
\r
6225 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6226 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6227 apiInfo->xrun[0] = false;
\r
6229 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6230 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6231 apiInfo->xrun[1] = false;
\r
6233 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6234 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6236 if ( doStopStream == 2 ) {
\r
6241 MUTEX_LOCK( &stream_.mutex );
\r
6243 // The state might change while waiting on a mutex.
\r
6244 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6249 snd_pcm_t **handle;
\r
6250 snd_pcm_sframes_t frames;
\r
6251 RtAudioFormat format;
\r
6252 handle = (snd_pcm_t **) apiInfo->handles;
\r
6254 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6256 // Setup parameters.
\r
6257 if ( stream_.doConvertBuffer[1] ) {
\r
6258 buffer = stream_.deviceBuffer;
\r
6259 channels = stream_.nDeviceChannels[1];
\r
6260 format = stream_.deviceFormat[1];
\r
6263 buffer = stream_.userBuffer[1];
\r
6264 channels = stream_.nUserChannels[1];
\r
6265 format = stream_.userFormat;
\r
6268 // Read samples from device in interleaved/non-interleaved format.
\r
6269 if ( stream_.deviceInterleaved[1] )
\r
6270 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6272 void *bufs[channels];
\r
6273 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6274 for ( int i=0; i<channels; i++ )
\r
6275 bufs[i] = (void *) (buffer + (i * offset));
\r
6276 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6279 if ( result < (int) stream_.bufferSize ) {
\r
6280 // Either an error or overrun occured.
\r
6281 if ( result == -EPIPE ) {
\r
6282 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6283 if ( state == SND_PCM_STATE_XRUN ) {
\r
6284 apiInfo->xrun[1] = true;
\r
6285 result = snd_pcm_prepare( handle[1] );
\r
6286 if ( result < 0 ) {
\r
6287 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6288 errorText_ = errorStream_.str();
\r
6292 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6293 errorText_ = errorStream_.str();
\r
6297 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6298 errorText_ = errorStream_.str();
\r
6300 error( RtAudioError::WARNING );
\r
6304 // Do byte swapping if necessary.
\r
6305 if ( stream_.doByteSwap[1] )
\r
6306 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6308 // Do buffer conversion if necessary.
\r
6309 if ( stream_.doConvertBuffer[1] )
\r
6310 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6312 // Check stream latency
\r
6313 result = snd_pcm_delay( handle[1], &frames );
\r
6314 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6319 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6321 // Setup parameters and do buffer conversion if necessary.
\r
6322 if ( stream_.doConvertBuffer[0] ) {
\r
6323 buffer = stream_.deviceBuffer;
\r
6324 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6325 channels = stream_.nDeviceChannels[0];
\r
6326 format = stream_.deviceFormat[0];
\r
6329 buffer = stream_.userBuffer[0];
\r
6330 channels = stream_.nUserChannels[0];
\r
6331 format = stream_.userFormat;
\r
6334 // Do byte swapping if necessary.
\r
6335 if ( stream_.doByteSwap[0] )
\r
6336 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6338 // Write samples to device in interleaved/non-interleaved format.
\r
6339 if ( stream_.deviceInterleaved[0] )
\r
6340 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6342 void *bufs[channels];
\r
6343 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6344 for ( int i=0; i<channels; i++ )
\r
6345 bufs[i] = (void *) (buffer + (i * offset));
\r
6346 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6349 if ( result < (int) stream_.bufferSize ) {
\r
6350 // Either an error or underrun occured.
\r
6351 if ( result == -EPIPE ) {
\r
6352 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6353 if ( state == SND_PCM_STATE_XRUN ) {
\r
6354 apiInfo->xrun[0] = true;
\r
6355 result = snd_pcm_prepare( handle[0] );
\r
6356 if ( result < 0 ) {
\r
6357 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6358 errorText_ = errorStream_.str();
\r
6362 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6363 errorText_ = errorStream_.str();
\r
6367 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6368 errorText_ = errorStream_.str();
\r
6370 error( RtAudioError::WARNING );
\r
6374 // Check stream latency
\r
6375 result = snd_pcm_delay( handle[0], &frames );
\r
6376 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6380 MUTEX_UNLOCK( &stream_.mutex );
\r
6382 RtApi::tickStreamTime();
\r
6383 if ( doStopStream == 1 ) this->stopStream();
\r
6386 static void *alsaCallbackHandler( void *ptr )
\r
6388 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6389 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6390 bool *isRunning = &info->isRunning;
\r
6392 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6393 if ( &info->doRealtime ) {
\r
6394 pthread_t tID = pthread_self(); // ID of this thread
\r
6395 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6396 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6400 while ( *isRunning == true ) {
\r
6401 pthread_testcancel();
\r
6402 object->callbackEvent();
\r
6405 pthread_exit( NULL );
\r
6408 //******************** End of __LINUX_ALSA__ *********************//
\r
6411 #if defined(__LINUX_PULSE__)
\r
6413 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6414 // and Tristan Matthews.
\r
6416 #include <pulse/error.h>
\r
6417 #include <pulse/simple.h>
\r
6420 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6421 44100, 48000, 96000, 0};
\r
6423 struct rtaudio_pa_format_mapping_t {
\r
6424 RtAudioFormat rtaudio_format;
\r
6425 pa_sample_format_t pa_format;
\r
6428 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6429 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6430 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6431 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6432 {0, PA_SAMPLE_INVALID}};
\r
6434 struct PulseAudioHandle {
\r
6435 pa_simple *s_play;
\r
6438 pthread_cond_t runnable_cv;
\r
6440 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6443 RtApiPulse::~RtApiPulse()
\r
6445 if ( stream_.state != STREAM_CLOSED )
\r
6449 unsigned int RtApiPulse::getDeviceCount( void )
\r
6454 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6456 RtAudio::DeviceInfo info;
\r
6457 info.probed = true;
\r
6458 info.name = "PulseAudio";
\r
6459 info.outputChannels = 2;
\r
6460 info.inputChannels = 2;
\r
6461 info.duplexChannels = 2;
\r
6462 info.isDefaultOutput = true;
\r
6463 info.isDefaultInput = true;
\r
6465 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6466 info.sampleRates.push_back( *sr );
\r
6468 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6473 static void *pulseaudio_callback( void * user )
\r
6475 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6476 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6477 volatile bool *isRunning = &cbi->isRunning;
\r
6479 while ( *isRunning ) {
\r
6480 pthread_testcancel();
\r
6481 context->callbackEvent();
\r
6484 pthread_exit( NULL );
\r
6487 void RtApiPulse::closeStream( void )
\r
6489 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6491 stream_.callbackInfo.isRunning = false;
\r
6493 MUTEX_LOCK( &stream_.mutex );
\r
6494 if ( stream_.state == STREAM_STOPPED ) {
\r
6495 pah->runnable = true;
\r
6496 pthread_cond_signal( &pah->runnable_cv );
\r
6498 MUTEX_UNLOCK( &stream_.mutex );
\r
6500 pthread_join( pah->thread, 0 );
\r
6501 if ( pah->s_play ) {
\r
6502 pa_simple_flush( pah->s_play, NULL );
\r
6503 pa_simple_free( pah->s_play );
\r
6506 pa_simple_free( pah->s_rec );
\r
6508 pthread_cond_destroy( &pah->runnable_cv );
\r
6510 stream_.apiHandle = 0;
\r
6513 if ( stream_.userBuffer[0] ) {
\r
6514 free( stream_.userBuffer[0] );
\r
6515 stream_.userBuffer[0] = 0;
\r
6517 if ( stream_.userBuffer[1] ) {
\r
6518 free( stream_.userBuffer[1] );
\r
6519 stream_.userBuffer[1] = 0;
\r
6522 stream_.state = STREAM_CLOSED;
\r
6523 stream_.mode = UNINITIALIZED;
\r
6526 void RtApiPulse::callbackEvent( void )
\r
6528 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6530 if ( stream_.state == STREAM_STOPPED ) {
\r
6531 MUTEX_LOCK( &stream_.mutex );
\r
6532 while ( !pah->runnable )
\r
6533 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6535 if ( stream_.state != STREAM_RUNNING ) {
\r
6536 MUTEX_UNLOCK( &stream_.mutex );
\r
6539 MUTEX_UNLOCK( &stream_.mutex );
\r
6542 if ( stream_.state == STREAM_CLOSED ) {
\r
6543 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6544 "this shouldn't happen!";
\r
6545 error( RtAudioError::WARNING );
\r
6549 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6550 double streamTime = getStreamTime();
\r
6551 RtAudioStreamStatus status = 0;
\r
6552 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
6553 stream_.bufferSize, streamTime, status,
\r
6554 stream_.callbackInfo.userData );
\r
6556 if ( doStopStream == 2 ) {
\r
6561 MUTEX_LOCK( &stream_.mutex );
\r
6562 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
6563 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
6565 if ( stream_.state != STREAM_RUNNING )
\r
6570 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6571 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
6572 convertBuffer( stream_.deviceBuffer,
\r
6573 stream_.userBuffer[OUTPUT],
\r
6574 stream_.convertInfo[OUTPUT] );
\r
6575 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
6576 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
6578 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
6579 formatBytes( stream_.userFormat );
\r
6581 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
6582 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6583 pa_strerror( pa_error ) << ".";
\r
6584 errorText_ = errorStream_.str();
\r
6585 error( RtAudioError::WARNING );
\r
6589 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
6590 if ( stream_.doConvertBuffer[INPUT] )
\r
6591 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
6592 formatBytes( stream_.deviceFormat[INPUT] );
\r
6594 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
6595 formatBytes( stream_.userFormat );
\r
6597 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
6598 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6599 pa_strerror( pa_error ) << ".";
\r
6600 errorText_ = errorStream_.str();
\r
6601 error( RtAudioError::WARNING );
\r
6603 if ( stream_.doConvertBuffer[INPUT] ) {
\r
6604 convertBuffer( stream_.userBuffer[INPUT],
\r
6605 stream_.deviceBuffer,
\r
6606 stream_.convertInfo[INPUT] );
\r
6611 MUTEX_UNLOCK( &stream_.mutex );
\r
6612 RtApi::tickStreamTime();
\r
6614 if ( doStopStream == 1 )
\r
6618 void RtApiPulse::startStream( void )
\r
6620 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6622 if ( stream_.state == STREAM_CLOSED ) {
\r
6623 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6624 error( RtAudioError::INVALID_USE );
\r
6627 if ( stream_.state == STREAM_RUNNING ) {
\r
6628 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6629 error( RtAudioError::WARNING );
\r
6633 MUTEX_LOCK( &stream_.mutex );
\r
6635 stream_.state = STREAM_RUNNING;
\r
6637 pah->runnable = true;
\r
6638 pthread_cond_signal( &pah->runnable_cv );
\r
6639 MUTEX_UNLOCK( &stream_.mutex );
\r
6642 void RtApiPulse::stopStream( void )
\r
6644 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6646 if ( stream_.state == STREAM_CLOSED ) {
\r
6647 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6648 error( RtAudioError::INVALID_USE );
\r
6651 if ( stream_.state == STREAM_STOPPED ) {
\r
6652 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6653 error( RtAudioError::WARNING );
\r
6657 stream_.state = STREAM_STOPPED;
\r
6658 MUTEX_LOCK( &stream_.mutex );
\r
6660 if ( pah && pah->s_play ) {
\r
6662 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6663 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6664 pa_strerror( pa_error ) << ".";
\r
6665 errorText_ = errorStream_.str();
\r
6666 MUTEX_UNLOCK( &stream_.mutex );
\r
6667 error( RtAudioError::SYSTEM_ERROR );
\r
6672 stream_.state = STREAM_STOPPED;
\r
6673 MUTEX_UNLOCK( &stream_.mutex );
\r
6676 void RtApiPulse::abortStream( void )
\r
6678 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6680 if ( stream_.state == STREAM_CLOSED ) {
\r
6681 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6682 error( RtAudioError::INVALID_USE );
\r
6685 if ( stream_.state == STREAM_STOPPED ) {
\r
6686 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6687 error( RtAudioError::WARNING );
\r
6691 stream_.state = STREAM_STOPPED;
\r
6692 MUTEX_LOCK( &stream_.mutex );
\r
6694 if ( pah && pah->s_play ) {
\r
6696 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6697 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6698 pa_strerror( pa_error ) << ".";
\r
6699 errorText_ = errorStream_.str();
\r
6700 MUTEX_UNLOCK( &stream_.mutex );
\r
6701 error( RtAudioError::SYSTEM_ERROR );
\r
6706 stream_.state = STREAM_STOPPED;
\r
6707 MUTEX_UNLOCK( &stream_.mutex );
\r
6710 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6711 unsigned int channels, unsigned int firstChannel,
\r
6712 unsigned int sampleRate, RtAudioFormat format,
\r
6713 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6715 PulseAudioHandle *pah = 0;
\r
6716 unsigned long bufferBytes = 0;
\r
6717 pa_sample_spec ss;
\r
6719 if ( device != 0 ) return false;
\r
6720 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6721 if ( channels != 1 && channels != 2 ) {
\r
6722 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6725 ss.channels = channels;
\r
6727 if ( firstChannel != 0 ) return false;
\r
6729 bool sr_found = false;
\r
6730 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6731 if ( sampleRate == *sr ) {
\r
6733 stream_.sampleRate = sampleRate;
\r
6734 ss.rate = sampleRate;
\r
6738 if ( !sr_found ) {
\r
6739 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6743 bool sf_found = 0;
\r
6744 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6745 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6746 if ( format == sf->rtaudio_format ) {
\r
6748 stream_.userFormat = sf->rtaudio_format;
\r
6749 ss.format = sf->pa_format;
\r
6753 if ( !sf_found ) {
\r
6754 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6758 // Set interleaving parameters.
\r
6759 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
6760 else stream_.userInterleaved = true;
\r
6761 stream_.deviceInterleaved[mode] = true;
\r
6762 stream_.nBuffers = 1;
\r
6763 stream_.doByteSwap[mode] = false;
\r
6764 stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved;
\r
6765 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6766 stream_.nUserChannels[mode] = channels;
\r
6767 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
6768 stream_.channelOffset[mode] = 0;
\r
6770 // Allocate necessary internal buffers.
\r
6771 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6772 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6773 if ( stream_.userBuffer[mode] == NULL ) {
\r
6774 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6777 stream_.bufferSize = *bufferSize;
\r
6779 if ( stream_.doConvertBuffer[mode] ) {
\r
6781 bool makeBuffer = true;
\r
6782 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6783 if ( mode == INPUT ) {
\r
6784 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6785 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6786 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6790 if ( makeBuffer ) {
\r
6791 bufferBytes *= *bufferSize;
\r
6792 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6793 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6794 if ( stream_.deviceBuffer == NULL ) {
\r
6795 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
6801 stream_.device[mode] = device;
\r
6803 // Setup the buffer conversion information structure.
\r
6804 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6806 if ( !stream_.apiHandle ) {
\r
6807 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6809 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6813 stream_.apiHandle = pah;
\r
6814 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6815 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6819 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6822 std::string streamName = "RtAudio";
\r
6823 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
6826 pa_buffer_attr buffer_attr;
\r
6827 buffer_attr.fragsize = bufferBytes;
\r
6828 buffer_attr.maxlength = -1;
\r
6830 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
6831 if ( !pah->s_rec ) {
\r
6832 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6837 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6838 if ( !pah->s_play ) {
\r
6839 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6847 if ( stream_.mode == UNINITIALIZED )
\r
6848 stream_.mode = mode;
\r
6849 else if ( stream_.mode == mode )
\r
6852 stream_.mode = DUPLEX;
\r
6854 if ( !stream_.callbackInfo.isRunning ) {
\r
6855 stream_.callbackInfo.object = this;
\r
6856 stream_.callbackInfo.isRunning = true;
\r
6857 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6858 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6863 stream_.state = STREAM_STOPPED;
\r
6867 if ( pah && stream_.callbackInfo.isRunning ) {
\r
6868 pthread_cond_destroy( &pah->runnable_cv );
\r
6870 stream_.apiHandle = 0;
\r
6873 for ( int i=0; i<2; i++ ) {
\r
6874 if ( stream_.userBuffer[i] ) {
\r
6875 free( stream_.userBuffer[i] );
\r
6876 stream_.userBuffer[i] = 0;
\r
6880 if ( stream_.deviceBuffer ) {
\r
6881 free( stream_.deviceBuffer );
\r
6882 stream_.deviceBuffer = 0;
\r
6888 //******************** End of __LINUX_PULSE__ *********************//
\r
6891 #if defined(__LINUX_OSS__)
\r
6893 #include <unistd.h>
\r
6894 #include <sys/ioctl.h>
\r
6895 #include <unistd.h>
\r
6896 #include <fcntl.h>
\r
6897 #include <sys/soundcard.h>
\r
6898 #include <errno.h>
\r
6901 static void *ossCallbackHandler(void * ptr);
\r
6903 // A structure to hold various information related to the OSS API
\r
6904 // implementation.
\r
6905 struct OssHandle {
\r
6906 int id[2]; // device ids
\r
6909 pthread_cond_t runnable;
\r
6912 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6915 RtApiOss :: RtApiOss()
\r
6917 // Nothing to do here.
\r
6920 RtApiOss :: ~RtApiOss()
\r
6922 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6925 unsigned int RtApiOss :: getDeviceCount( void )
\r
6927 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6928 if ( mixerfd == -1 ) {
\r
6929 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6930 error( RtAudioError::WARNING );
\r
6934 oss_sysinfo sysinfo;
\r
6935 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6937 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6938 error( RtAudioError::WARNING );
\r
6943 return sysinfo.numaudios;
\r
6946 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6948 RtAudio::DeviceInfo info;
\r
6949 info.probed = false;
\r
6951 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6952 if ( mixerfd == -1 ) {
\r
6953 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6954 error( RtAudioError::WARNING );
\r
6958 oss_sysinfo sysinfo;
\r
6959 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6960 if ( result == -1 ) {
\r
6962 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6963 error( RtAudioError::WARNING );
\r
6967 unsigned nDevices = sysinfo.numaudios;
\r
6968 if ( nDevices == 0 ) {
\r
6970 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6971 error( RtAudioError::INVALID_USE );
\r
6975 if ( device >= nDevices ) {
\r
6977 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6978 error( RtAudioError::INVALID_USE );
\r
6982 oss_audioinfo ainfo;
\r
6983 ainfo.dev = device;
\r
6984 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6986 if ( result == -1 ) {
\r
6987 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6988 errorText_ = errorStream_.str();
\r
6989 error( RtAudioError::WARNING );
\r
6994 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6995 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6996 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6997 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6998 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7001 // Probe data formats ... do for input
\r
7002 unsigned long mask = ainfo.iformats;
\r
7003 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
7004 info.nativeFormats |= RTAUDIO_SINT16;
\r
7005 if ( mask & AFMT_S8 )
\r
7006 info.nativeFormats |= RTAUDIO_SINT8;
\r
7007 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
7008 info.nativeFormats |= RTAUDIO_SINT32;
\r
7009 if ( mask & AFMT_FLOAT )
\r
7010 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7011 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
7012 info.nativeFormats |= RTAUDIO_SINT24;
\r
7014 // Check that we have at least one supported format
\r
7015 if ( info.nativeFormats == 0 ) {
\r
7016 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7017 errorText_ = errorStream_.str();
\r
7018 error( RtAudioError::WARNING );
\r
7022 // Probe the supported sample rates.
\r
7023 info.sampleRates.clear();
\r
7024 if ( ainfo.nrates ) {
\r
7025 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
7026 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7027 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
7028 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7035 // Check min and max rate values;
\r
7036 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
7037 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
7038 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
7042 if ( info.sampleRates.size() == 0 ) {
\r
7043 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
7044 errorText_ = errorStream_.str();
\r
7045 error( RtAudioError::WARNING );
\r
7048 info.probed = true;
\r
7049 info.name = ainfo.name;
\r
7056 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7057 unsigned int firstChannel, unsigned int sampleRate,
\r
7058 RtAudioFormat format, unsigned int *bufferSize,
\r
7059 RtAudio::StreamOptions *options )
\r
7061 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
7062 if ( mixerfd == -1 ) {
\r
7063 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
7067 oss_sysinfo sysinfo;
\r
7068 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
7069 if ( result == -1 ) {
\r
7071 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
7075 unsigned nDevices = sysinfo.numaudios;
\r
7076 if ( nDevices == 0 ) {
\r
7077 // This should not happen because a check is made before this function is called.
\r
7079 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
7083 if ( device >= nDevices ) {
\r
7084 // This should not happen because a check is made before this function is called.
\r
7086 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
7090 oss_audioinfo ainfo;
\r
7091 ainfo.dev = device;
\r
7092 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
7094 if ( result == -1 ) {
\r
7095 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
7096 errorText_ = errorStream_.str();
\r
7100 // Check if device supports input or output
\r
7101 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
7102 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
7103 if ( mode == OUTPUT )
\r
7104 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
7106 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
7107 errorText_ = errorStream_.str();
\r
7112 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7113 if ( mode == OUTPUT )
\r
7114 flags |= O_WRONLY;
\r
7115 else { // mode == INPUT
\r
7116 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7117 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
7118 close( handle->id[0] );
\r
7119 handle->id[0] = 0;
\r
7120 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
7121 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
7122 errorText_ = errorStream_.str();
\r
7125 // Check that the number previously set channels is the same.
\r
7126 if ( stream_.nUserChannels[0] != channels ) {
\r
7127 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7128 errorText_ = errorStream_.str();
\r
7134 flags |= O_RDONLY;
\r
7137 // Set exclusive access if specified.
\r
7138 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7140 // Try to open the device.
\r
7142 fd = open( ainfo.devnode, flags, 0 );
\r
7144 if ( errno == EBUSY )
\r
7145 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7147 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7148 errorText_ = errorStream_.str();
\r
7152 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7154 if ( flags | O_RDWR ) {
\r
7155 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7156 if ( result == -1) {
\r
7157 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7158 errorText_ = errorStream_.str();
\r
7164 // Check the device channel support.
\r
7165 stream_.nUserChannels[mode] = channels;
\r
7166 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7168 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7169 errorText_ = errorStream_.str();
\r
7173 // Set the number of channels.
\r
7174 int deviceChannels = channels + firstChannel;
\r
7175 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7176 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7178 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7179 errorText_ = errorStream_.str();
\r
7182 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7184 // Get the data format mask
\r
7186 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7187 if ( result == -1 ) {
\r
7189 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7190 errorText_ = errorStream_.str();
\r
7194 // Determine how to set the device format.
\r
7195 stream_.userFormat = format;
\r
7196 int deviceFormat = -1;
\r
7197 stream_.doByteSwap[mode] = false;
\r
7198 if ( format == RTAUDIO_SINT8 ) {
\r
7199 if ( mask & AFMT_S8 ) {
\r
7200 deviceFormat = AFMT_S8;
\r
7201 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7204 else if ( format == RTAUDIO_SINT16 ) {
\r
7205 if ( mask & AFMT_S16_NE ) {
\r
7206 deviceFormat = AFMT_S16_NE;
\r
7207 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7209 else if ( mask & AFMT_S16_OE ) {
\r
7210 deviceFormat = AFMT_S16_OE;
\r
7211 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7212 stream_.doByteSwap[mode] = true;
\r
7215 else if ( format == RTAUDIO_SINT24 ) {
\r
7216 if ( mask & AFMT_S24_NE ) {
\r
7217 deviceFormat = AFMT_S24_NE;
\r
7218 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7220 else if ( mask & AFMT_S24_OE ) {
\r
7221 deviceFormat = AFMT_S24_OE;
\r
7222 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7223 stream_.doByteSwap[mode] = true;
\r
7226 else if ( format == RTAUDIO_SINT32 ) {
\r
7227 if ( mask & AFMT_S32_NE ) {
\r
7228 deviceFormat = AFMT_S32_NE;
\r
7229 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7231 else if ( mask & AFMT_S32_OE ) {
\r
7232 deviceFormat = AFMT_S32_OE;
\r
7233 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7234 stream_.doByteSwap[mode] = true;
\r
7238 if ( deviceFormat == -1 ) {
\r
7239 // The user requested format is not natively supported by the device.
\r
7240 if ( mask & AFMT_S16_NE ) {
\r
7241 deviceFormat = AFMT_S16_NE;
\r
7242 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7244 else if ( mask & AFMT_S32_NE ) {
\r
7245 deviceFormat = AFMT_S32_NE;
\r
7246 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7248 else if ( mask & AFMT_S24_NE ) {
\r
7249 deviceFormat = AFMT_S24_NE;
\r
7250 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7252 else if ( mask & AFMT_S16_OE ) {
\r
7253 deviceFormat = AFMT_S16_OE;
\r
7254 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7255 stream_.doByteSwap[mode] = true;
\r
7257 else if ( mask & AFMT_S32_OE ) {
\r
7258 deviceFormat = AFMT_S32_OE;
\r
7259 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7260 stream_.doByteSwap[mode] = true;
\r
7262 else if ( mask & AFMT_S24_OE ) {
\r
7263 deviceFormat = AFMT_S24_OE;
\r
7264 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7265 stream_.doByteSwap[mode] = true;
\r
7267 else if ( mask & AFMT_S8) {
\r
7268 deviceFormat = AFMT_S8;
\r
7269 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7273 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7274 // This really shouldn't happen ...
\r
7276 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7277 errorText_ = errorStream_.str();
\r
7281 // Set the data format.
\r
7282 int temp = deviceFormat;
\r
7283 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7284 if ( result == -1 || deviceFormat != temp ) {
\r
7286 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7287 errorText_ = errorStream_.str();
\r
7291 // Attempt to set the buffer size. According to OSS, the minimum
\r
7292 // number of buffers is two. The supposed minimum buffer size is 16
\r
7293 // bytes, so that will be our lower bound. The argument to this
\r
7294 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7295 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7296 // We'll check the actual value used near the end of the setup
\r
7298 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7299 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7301 if ( options ) buffers = options->numberOfBuffers;
\r
7302 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7303 if ( buffers < 2 ) buffers = 3;
\r
7304 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7305 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7306 if ( result == -1 ) {
\r
7308 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7309 errorText_ = errorStream_.str();
\r
7312 stream_.nBuffers = buffers;
\r
7314 // Save buffer size (in sample frames).
\r
7315 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7316 stream_.bufferSize = *bufferSize;
\r
7318 // Set the sample rate.
\r
7319 int srate = sampleRate;
\r
7320 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7321 if ( result == -1 ) {
\r
7323 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7324 errorText_ = errorStream_.str();
\r
7328 // Verify the sample rate setup worked.
\r
7329 if ( abs( srate - sampleRate ) > 100 ) {
\r
7331 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7332 errorText_ = errorStream_.str();
\r
7335 stream_.sampleRate = sampleRate;
\r
7337 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7338 // We're doing duplex setup here.
\r
7339 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7340 stream_.nDeviceChannels[0] = deviceChannels;
\r
7343 // Set interleaving parameters.
\r
7344 stream_.userInterleaved = true;
\r
7345 stream_.deviceInterleaved[mode] = true;
\r
7346 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7347 stream_.userInterleaved = false;
\r
7349 // Set flags for buffer conversion
\r
7350 stream_.doConvertBuffer[mode] = false;
\r
7351 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7352 stream_.doConvertBuffer[mode] = true;
\r
7353 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7354 stream_.doConvertBuffer[mode] = true;
\r
7355 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7356 stream_.nUserChannels[mode] > 1 )
\r
7357 stream_.doConvertBuffer[mode] = true;
\r
7359 // Allocate the stream handles if necessary and then save.
\r
7360 if ( stream_.apiHandle == 0 ) {
\r
7362 handle = new OssHandle;
\r
7364 catch ( std::bad_alloc& ) {
\r
7365 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7369 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7370 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7374 stream_.apiHandle = (void *) handle;
\r
7377 handle = (OssHandle *) stream_.apiHandle;
\r
7379 handle->id[mode] = fd;
\r
7381 // Allocate necessary internal buffers.
\r
7382 unsigned long bufferBytes;
\r
7383 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7384 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7385 if ( stream_.userBuffer[mode] == NULL ) {
\r
7386 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7390 if ( stream_.doConvertBuffer[mode] ) {
\r
7392 bool makeBuffer = true;
\r
7393 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7394 if ( mode == INPUT ) {
\r
7395 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7396 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7397 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7401 if ( makeBuffer ) {
\r
7402 bufferBytes *= *bufferSize;
\r
7403 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7404 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7405 if ( stream_.deviceBuffer == NULL ) {
\r
7406 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7412 stream_.device[mode] = device;
\r
7413 stream_.state = STREAM_STOPPED;
\r
7415 // Setup the buffer conversion information structure.
\r
7416 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7418 // Setup thread if necessary.
\r
7419 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7420 // We had already set up an output stream.
\r
7421 stream_.mode = DUPLEX;
\r
7422 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7425 stream_.mode = mode;
\r
7427 // Setup callback thread.
\r
7428 stream_.callbackInfo.object = (void *) this;
\r
7430 // Set the thread attributes for joinable and realtime scheduling
\r
7431 // priority. The higher priority will only take affect if the
\r
7432 // program is run as root or suid.
\r
7433 pthread_attr_t attr;
\r
7434 pthread_attr_init( &attr );
\r
7435 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7436 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7437 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7438 struct sched_param param;
\r
7439 int priority = options->priority;
\r
7440 int min = sched_get_priority_min( SCHED_RR );
\r
7441 int max = sched_get_priority_max( SCHED_RR );
\r
7442 if ( priority < min ) priority = min;
\r
7443 else if ( priority > max ) priority = max;
\r
7444 param.sched_priority = priority;
\r
7445 pthread_attr_setschedparam( &attr, ¶m );
\r
7446 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7449 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7451 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7454 stream_.callbackInfo.isRunning = true;
\r
7455 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7456 pthread_attr_destroy( &attr );
\r
7458 stream_.callbackInfo.isRunning = false;
\r
7459 errorText_ = "RtApiOss::error creating callback thread!";
\r
7468 pthread_cond_destroy( &handle->runnable );
\r
7469 if ( handle->id[0] ) close( handle->id[0] );
\r
7470 if ( handle->id[1] ) close( handle->id[1] );
\r
7472 stream_.apiHandle = 0;
\r
7475 for ( int i=0; i<2; i++ ) {
\r
7476 if ( stream_.userBuffer[i] ) {
\r
7477 free( stream_.userBuffer[i] );
\r
7478 stream_.userBuffer[i] = 0;
\r
7482 if ( stream_.deviceBuffer ) {
\r
7483 free( stream_.deviceBuffer );
\r
7484 stream_.deviceBuffer = 0;
\r
7490 void RtApiOss :: closeStream()
\r
7492 if ( stream_.state == STREAM_CLOSED ) {
\r
7493 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7494 error( RtAudioError::WARNING );
\r
7498 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7499 stream_.callbackInfo.isRunning = false;
\r
7500 MUTEX_LOCK( &stream_.mutex );
\r
7501 if ( stream_.state == STREAM_STOPPED )
\r
7502 pthread_cond_signal( &handle->runnable );
\r
7503 MUTEX_UNLOCK( &stream_.mutex );
\r
7504 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7506 if ( stream_.state == STREAM_RUNNING ) {
\r
7507 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7508 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7510 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7511 stream_.state = STREAM_STOPPED;
\r
7515 pthread_cond_destroy( &handle->runnable );
\r
7516 if ( handle->id[0] ) close( handle->id[0] );
\r
7517 if ( handle->id[1] ) close( handle->id[1] );
\r
7519 stream_.apiHandle = 0;
\r
7522 for ( int i=0; i<2; i++ ) {
\r
7523 if ( stream_.userBuffer[i] ) {
\r
7524 free( stream_.userBuffer[i] );
\r
7525 stream_.userBuffer[i] = 0;
\r
7529 if ( stream_.deviceBuffer ) {
\r
7530 free( stream_.deviceBuffer );
\r
7531 stream_.deviceBuffer = 0;
\r
7534 stream_.mode = UNINITIALIZED;
\r
7535 stream_.state = STREAM_CLOSED;
\r
7538 void RtApiOss :: startStream()
\r
7541 if ( stream_.state == STREAM_RUNNING ) {
\r
7542 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7543 error( RtAudioError::WARNING );
\r
7547 MUTEX_LOCK( &stream_.mutex );
\r
7549 stream_.state = STREAM_RUNNING;
\r
7551 // No need to do anything else here ... OSS automatically starts
\r
7552 // when fed samples.
\r
7554 MUTEX_UNLOCK( &stream_.mutex );
\r
7556 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7557 pthread_cond_signal( &handle->runnable );
\r
7560 void RtApiOss :: stopStream()
\r
7563 if ( stream_.state == STREAM_STOPPED ) {
\r
7564 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7565 error( RtAudioError::WARNING );
\r
7569 MUTEX_LOCK( &stream_.mutex );
\r
7571 // The state might change while waiting on a mutex.
\r
7572 if ( stream_.state == STREAM_STOPPED ) {
\r
7573 MUTEX_UNLOCK( &stream_.mutex );
\r
7578 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7579 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7581 // Flush the output with zeros a few times.
\r
7584 RtAudioFormat format;
\r
7586 if ( stream_.doConvertBuffer[0] ) {
\r
7587 buffer = stream_.deviceBuffer;
\r
7588 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7589 format = stream_.deviceFormat[0];
\r
7592 buffer = stream_.userBuffer[0];
\r
7593 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7594 format = stream_.userFormat;
\r
7597 memset( buffer, 0, samples * formatBytes(format) );
\r
7598 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7599 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7600 if ( result == -1 ) {
\r
7601 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7602 error( RtAudioError::WARNING );
\r
7606 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7607 if ( result == -1 ) {
\r
7608 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7609 errorText_ = errorStream_.str();
\r
7612 handle->triggered = false;
\r
7615 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7616 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7617 if ( result == -1 ) {
\r
7618 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7619 errorText_ = errorStream_.str();
\r
7625 stream_.state = STREAM_STOPPED;
\r
7626 MUTEX_UNLOCK( &stream_.mutex );
\r
7628 if ( result != -1 ) return;
\r
7629 error( RtAudioError::SYSTEM_ERROR );
\r
7632 void RtApiOss :: abortStream()
\r
7635 if ( stream_.state == STREAM_STOPPED ) {
\r
7636 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7637 error( RtAudioError::WARNING );
\r
7641 MUTEX_LOCK( &stream_.mutex );
\r
7643 // The state might change while waiting on a mutex.
\r
7644 if ( stream_.state == STREAM_STOPPED ) {
\r
7645 MUTEX_UNLOCK( &stream_.mutex );
\r
7650 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7651 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7652 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7653 if ( result == -1 ) {
\r
7654 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7655 errorText_ = errorStream_.str();
\r
7658 handle->triggered = false;
\r
7661 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7662 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7663 if ( result == -1 ) {
\r
7664 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7665 errorText_ = errorStream_.str();
\r
7671 stream_.state = STREAM_STOPPED;
\r
7672 MUTEX_UNLOCK( &stream_.mutex );
\r
7674 if ( result != -1 ) return;
\r
7675 error( RtAudioError::SYSTEM_ERROR );
\r
7678 void RtApiOss :: callbackEvent()
\r
7680 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7681 if ( stream_.state == STREAM_STOPPED ) {
\r
7682 MUTEX_LOCK( &stream_.mutex );
\r
7683 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7684 if ( stream_.state != STREAM_RUNNING ) {
\r
7685 MUTEX_UNLOCK( &stream_.mutex );
\r
7688 MUTEX_UNLOCK( &stream_.mutex );
\r
7691 if ( stream_.state == STREAM_CLOSED ) {
\r
7692 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7693 error( RtAudioError::WARNING );
\r
7697 // Invoke user callback to get fresh output data.
\r
7698 int doStopStream = 0;
\r
7699 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7700 double streamTime = getStreamTime();
\r
7701 RtAudioStreamStatus status = 0;
\r
7702 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7703 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7704 handle->xrun[0] = false;
\r
7706 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7707 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7708 handle->xrun[1] = false;
\r
7710 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7711 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7712 if ( doStopStream == 2 ) {
\r
7713 this->abortStream();
\r
7717 MUTEX_LOCK( &stream_.mutex );
\r
7719 // The state might change while waiting on a mutex.
\r
7720 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7725 RtAudioFormat format;
\r
7727 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7729 // Setup parameters and do buffer conversion if necessary.
\r
7730 if ( stream_.doConvertBuffer[0] ) {
\r
7731 buffer = stream_.deviceBuffer;
\r
7732 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7733 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7734 format = stream_.deviceFormat[0];
\r
7737 buffer = stream_.userBuffer[0];
\r
7738 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7739 format = stream_.userFormat;
\r
7742 // Do byte swapping if necessary.
\r
7743 if ( stream_.doByteSwap[0] )
\r
7744 byteSwapBuffer( buffer, samples, format );
\r
7746 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7748 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7749 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7750 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7751 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7752 handle->triggered = true;
\r
7755 // Write samples to device.
\r
7756 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7758 if ( result == -1 ) {
\r
7759 // We'll assume this is an underrun, though there isn't a
\r
7760 // specific means for determining that.
\r
7761 handle->xrun[0] = true;
\r
7762 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7763 error( RtAudioError::WARNING );
\r
7764 // Continue on to input section.
\r
7768 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7770 // Setup parameters.
\r
7771 if ( stream_.doConvertBuffer[1] ) {
\r
7772 buffer = stream_.deviceBuffer;
\r
7773 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7774 format = stream_.deviceFormat[1];
\r
7777 buffer = stream_.userBuffer[1];
\r
7778 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7779 format = stream_.userFormat;
\r
7782 // Read samples from device.
\r
7783 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7785 if ( result == -1 ) {
\r
7786 // We'll assume this is an overrun, though there isn't a
\r
7787 // specific means for determining that.
\r
7788 handle->xrun[1] = true;
\r
7789 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7790 error( RtAudioError::WARNING );
\r
7794 // Do byte swapping if necessary.
\r
7795 if ( stream_.doByteSwap[1] )
\r
7796 byteSwapBuffer( buffer, samples, format );
\r
7798 // Do buffer conversion if necessary.
\r
7799 if ( stream_.doConvertBuffer[1] )
\r
7800 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7804 MUTEX_UNLOCK( &stream_.mutex );
\r
7806 RtApi::tickStreamTime();
\r
7807 if ( doStopStream == 1 ) this->stopStream();
\r
7810 static void *ossCallbackHandler( void *ptr )
\r
7812 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7813 RtApiOss *object = (RtApiOss *) info->object;
\r
7814 bool *isRunning = &info->isRunning;
\r
7816 while ( *isRunning == true ) {
\r
7817 pthread_testcancel();
\r
7818 object->callbackEvent();
\r
7821 pthread_exit( NULL );
\r
7824 //******************** End of __LINUX_OSS__ *********************//
\r
7828 // *************************************************** //
\r
7830 // Protected common (OS-independent) RtAudio methods.
\r
7832 // *************************************************** //
\r
7834 // This method can be modified to control the behavior of error
\r
7835 // message printing.
\r
7836 void RtApi :: error( RtAudioError::Type type )
\r
7838 errorStream_.str(""); // clear the ostringstream
\r
7840 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
7841 if ( errorCallback ) {
\r
7842 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
7844 if ( firstErrorOccurred )
\r
7847 firstErrorOccurred = true;
\r
7848 const std::string errorMessage = errorText_;
\r
7850 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
7851 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
7855 errorCallback( type, errorMessage );
\r
7856 firstErrorOccurred = false;
\r
7860 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
7861 std::cerr << '\n' << errorText_ << "\n\n";
\r
7862 else if ( type != RtAudioError::WARNING )
\r
7863 throw( RtAudioError( errorText_, type ) );
\r
7866 void RtApi :: verifyStream()
\r
7868 if ( stream_.state == STREAM_CLOSED ) {
\r
7869 errorText_ = "RtApi:: a stream is not open!";
\r
7870 error( RtAudioError::INVALID_USE );
\r
7874 void RtApi :: clearStreamInfo()
\r
7876 stream_.mode = UNINITIALIZED;
\r
7877 stream_.state = STREAM_CLOSED;
\r
7878 stream_.sampleRate = 0;
\r
7879 stream_.bufferSize = 0;
\r
7880 stream_.nBuffers = 0;
\r
7881 stream_.userFormat = 0;
\r
7882 stream_.userInterleaved = true;
\r
7883 stream_.streamTime = 0.0;
\r
7884 stream_.apiHandle = 0;
\r
7885 stream_.deviceBuffer = 0;
\r
7886 stream_.callbackInfo.callback = 0;
\r
7887 stream_.callbackInfo.userData = 0;
\r
7888 stream_.callbackInfo.isRunning = false;
\r
7889 stream_.callbackInfo.errorCallback = 0;
\r
7890 for ( int i=0; i<2; i++ ) {
\r
7891 stream_.device[i] = 11111;
\r
7892 stream_.doConvertBuffer[i] = false;
\r
7893 stream_.deviceInterleaved[i] = true;
\r
7894 stream_.doByteSwap[i] = false;
\r
7895 stream_.nUserChannels[i] = 0;
\r
7896 stream_.nDeviceChannels[i] = 0;
\r
7897 stream_.channelOffset[i] = 0;
\r
7898 stream_.deviceFormat[i] = 0;
\r
7899 stream_.latency[i] = 0;
\r
7900 stream_.userBuffer[i] = 0;
\r
7901 stream_.convertInfo[i].channels = 0;
\r
7902 stream_.convertInfo[i].inJump = 0;
\r
7903 stream_.convertInfo[i].outJump = 0;
\r
7904 stream_.convertInfo[i].inFormat = 0;
\r
7905 stream_.convertInfo[i].outFormat = 0;
\r
7906 stream_.convertInfo[i].inOffset.clear();
\r
7907 stream_.convertInfo[i].outOffset.clear();
\r
7911 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7913 if ( format == RTAUDIO_SINT16 )
\r
7915 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7917 else if ( format == RTAUDIO_FLOAT64 )
\r
7919 else if ( format == RTAUDIO_SINT24 )
\r
7921 else if ( format == RTAUDIO_SINT8 )
\r
7924 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7925 error( RtAudioError::WARNING );
\r
7930 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7932 if ( mode == INPUT ) { // convert device to user buffer
\r
7933 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7934 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7935 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7936 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7938 else { // convert user to device buffer
\r
7939 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7940 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7941 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7942 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7945 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7946 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7948 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7950 // Set up the interleave/deinterleave offsets.
\r
7951 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7952 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7953 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7954 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7955 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7956 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7957 stream_.convertInfo[mode].inJump = 1;
\r
7961 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7962 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7963 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7964 stream_.convertInfo[mode].outJump = 1;
\r
7968 else { // no (de)interleaving
\r
7969 if ( stream_.userInterleaved ) {
\r
7970 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7971 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7972 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7976 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7977 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7978 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7979 stream_.convertInfo[mode].inJump = 1;
\r
7980 stream_.convertInfo[mode].outJump = 1;
\r
7985 // Add channel offset.
\r
7986 if ( firstChannel > 0 ) {
\r
7987 if ( stream_.deviceInterleaved[mode] ) {
\r
7988 if ( mode == OUTPUT ) {
\r
7989 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7990 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7993 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7994 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7998 if ( mode == OUTPUT ) {
\r
7999 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8000 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8003 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
8004 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
8010 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
8012 // This function does format conversion, input/output channel compensation, and
\r
8013 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
8014 // the lower three bytes of a 32-bit integer.
\r
8016 // Clear our device buffer when in/out duplex device channels are different
\r
8017 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
8018 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
8019 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
8022 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
8024 Float64 *out = (Float64 *)outBuffer;
\r
8026 if (info.inFormat == RTAUDIO_SINT8) {
\r
8027 signed char *in = (signed char *)inBuffer;
\r
8028 scale = 1.0 / 127.5;
\r
8029 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8030 for (j=0; j<info.channels; j++) {
\r
8031 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8032 out[info.outOffset[j]] += 0.5;
\r
8033 out[info.outOffset[j]] *= scale;
\r
8035 in += info.inJump;
\r
8036 out += info.outJump;
\r
8039 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8040 Int16 *in = (Int16 *)inBuffer;
\r
8041 scale = 1.0 / 32767.5;
\r
8042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8043 for (j=0; j<info.channels; j++) {
\r
8044 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8045 out[info.outOffset[j]] += 0.5;
\r
8046 out[info.outOffset[j]] *= scale;
\r
8048 in += info.inJump;
\r
8049 out += info.outJump;
\r
8052 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8053 Int24 *in = (Int24 *)inBuffer;
\r
8054 scale = 1.0 / 8388607.5;
\r
8055 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8056 for (j=0; j<info.channels; j++) {
\r
8057 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
8058 out[info.outOffset[j]] += 0.5;
\r
8059 out[info.outOffset[j]] *= scale;
\r
8061 in += info.inJump;
\r
8062 out += info.outJump;
\r
8065 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8066 Int32 *in = (Int32 *)inBuffer;
\r
8067 scale = 1.0 / 2147483647.5;
\r
8068 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8069 for (j=0; j<info.channels; j++) {
\r
8070 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8071 out[info.outOffset[j]] += 0.5;
\r
8072 out[info.outOffset[j]] *= scale;
\r
8074 in += info.inJump;
\r
8075 out += info.outJump;
\r
8078 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8079 Float32 *in = (Float32 *)inBuffer;
\r
8080 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8081 for (j=0; j<info.channels; j++) {
\r
8082 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
8084 in += info.inJump;
\r
8085 out += info.outJump;
\r
8088 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8089 // Channel compensation and/or (de)interleaving only.
\r
8090 Float64 *in = (Float64 *)inBuffer;
\r
8091 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8092 for (j=0; j<info.channels; j++) {
\r
8093 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8095 in += info.inJump;
\r
8096 out += info.outJump;
\r
8100 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
8102 Float32 *out = (Float32 *)outBuffer;
\r
8104 if (info.inFormat == RTAUDIO_SINT8) {
\r
8105 signed char *in = (signed char *)inBuffer;
\r
8106 scale = (Float32) ( 1.0 / 127.5 );
\r
8107 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8108 for (j=0; j<info.channels; j++) {
\r
8109 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8110 out[info.outOffset[j]] += 0.5;
\r
8111 out[info.outOffset[j]] *= scale;
\r
8113 in += info.inJump;
\r
8114 out += info.outJump;
\r
8117 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8118 Int16 *in = (Int16 *)inBuffer;
\r
8119 scale = (Float32) ( 1.0 / 32767.5 );
\r
8120 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8121 for (j=0; j<info.channels; j++) {
\r
8122 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8123 out[info.outOffset[j]] += 0.5;
\r
8124 out[info.outOffset[j]] *= scale;
\r
8126 in += info.inJump;
\r
8127 out += info.outJump;
\r
8130 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8131 Int24 *in = (Int24 *)inBuffer;
\r
8132 scale = (Float32) ( 1.0 / 8388607.5 );
\r
8133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8134 for (j=0; j<info.channels; j++) {
\r
8135 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
8136 out[info.outOffset[j]] += 0.5;
\r
8137 out[info.outOffset[j]] *= scale;
\r
8139 in += info.inJump;
\r
8140 out += info.outJump;
\r
8143 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8144 Int32 *in = (Int32 *)inBuffer;
\r
8145 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
8146 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8147 for (j=0; j<info.channels; j++) {
\r
8148 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8149 out[info.outOffset[j]] += 0.5;
\r
8150 out[info.outOffset[j]] *= scale;
\r
8152 in += info.inJump;
\r
8153 out += info.outJump;
\r
8156 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8157 // Channel compensation and/or (de)interleaving only.
\r
8158 Float32 *in = (Float32 *)inBuffer;
\r
8159 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8160 for (j=0; j<info.channels; j++) {
\r
8161 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8163 in += info.inJump;
\r
8164 out += info.outJump;
\r
8167 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8168 Float64 *in = (Float64 *)inBuffer;
\r
8169 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8170 for (j=0; j<info.channels; j++) {
\r
8171 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8173 in += info.inJump;
\r
8174 out += info.outJump;
\r
8178 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8179 Int32 *out = (Int32 *)outBuffer;
\r
8180 if (info.inFormat == RTAUDIO_SINT8) {
\r
8181 signed char *in = (signed char *)inBuffer;
\r
8182 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8183 for (j=0; j<info.channels; j++) {
\r
8184 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8185 out[info.outOffset[j]] <<= 24;
\r
8187 in += info.inJump;
\r
8188 out += info.outJump;
\r
8191 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8192 Int16 *in = (Int16 *)inBuffer;
\r
8193 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8194 for (j=0; j<info.channels; j++) {
\r
8195 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8196 out[info.outOffset[j]] <<= 16;
\r
8198 in += info.inJump;
\r
8199 out += info.outJump;
\r
8202 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8203 Int24 *in = (Int24 *)inBuffer;
\r
8204 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8205 for (j=0; j<info.channels; j++) {
\r
8206 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8207 out[info.outOffset[j]] <<= 8;
\r
8209 in += info.inJump;
\r
8210 out += info.outJump;
\r
8213 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8214 // Channel compensation and/or (de)interleaving only.
\r
8215 Int32 *in = (Int32 *)inBuffer;
\r
8216 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8217 for (j=0; j<info.channels; j++) {
\r
8218 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8220 in += info.inJump;
\r
8221 out += info.outJump;
\r
8224 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8225 Float32 *in = (Float32 *)inBuffer;
\r
8226 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8227 for (j=0; j<info.channels; j++) {
\r
8228 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8230 in += info.inJump;
\r
8231 out += info.outJump;
\r
8234 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8235 Float64 *in = (Float64 *)inBuffer;
\r
8236 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8237 for (j=0; j<info.channels; j++) {
\r
8238 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8240 in += info.inJump;
\r
8241 out += info.outJump;
\r
8245 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8246 Int24 *out = (Int24 *)outBuffer;
\r
8247 if (info.inFormat == RTAUDIO_SINT8) {
\r
8248 signed char *in = (signed char *)inBuffer;
\r
8249 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8250 for (j=0; j<info.channels; j++) {
\r
8251 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8252 //out[info.outOffset[j]] <<= 16;
\r
8254 in += info.inJump;
\r
8255 out += info.outJump;
\r
8258 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8259 Int16 *in = (Int16 *)inBuffer;
\r
8260 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8261 for (j=0; j<info.channels; j++) {
\r
8262 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8263 //out[info.outOffset[j]] <<= 8;
\r
8265 in += info.inJump;
\r
8266 out += info.outJump;
\r
8269 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8270 // Channel compensation and/or (de)interleaving only.
\r
8271 Int24 *in = (Int24 *)inBuffer;
\r
8272 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8273 for (j=0; j<info.channels; j++) {
\r
8274 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8276 in += info.inJump;
\r
8277 out += info.outJump;
\r
8280 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8281 Int32 *in = (Int32 *)inBuffer;
\r
8282 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8283 for (j=0; j<info.channels; j++) {
\r
8284 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8285 //out[info.outOffset[j]] >>= 8;
\r
8287 in += info.inJump;
\r
8288 out += info.outJump;
\r
8291 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8292 Float32 *in = (Float32 *)inBuffer;
\r
8293 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8294 for (j=0; j<info.channels; j++) {
\r
8295 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8297 in += info.inJump;
\r
8298 out += info.outJump;
\r
8301 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8302 Float64 *in = (Float64 *)inBuffer;
\r
8303 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8304 for (j=0; j<info.channels; j++) {
\r
8305 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8307 in += info.inJump;
\r
8308 out += info.outJump;
\r
8312 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8313 Int16 *out = (Int16 *)outBuffer;
\r
8314 if (info.inFormat == RTAUDIO_SINT8) {
\r
8315 signed char *in = (signed char *)inBuffer;
\r
8316 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8317 for (j=0; j<info.channels; j++) {
\r
8318 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8319 out[info.outOffset[j]] <<= 8;
\r
8321 in += info.inJump;
\r
8322 out += info.outJump;
\r
8325 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8326 // Channel compensation and/or (de)interleaving only.
\r
8327 Int16 *in = (Int16 *)inBuffer;
\r
8328 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8329 for (j=0; j<info.channels; j++) {
\r
8330 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8332 in += info.inJump;
\r
8333 out += info.outJump;
\r
8336 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8337 Int24 *in = (Int24 *)inBuffer;
\r
8338 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8339 for (j=0; j<info.channels; j++) {
\r
8340 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8342 in += info.inJump;
\r
8343 out += info.outJump;
\r
8346 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8347 Int32 *in = (Int32 *)inBuffer;
\r
8348 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8349 for (j=0; j<info.channels; j++) {
\r
8350 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8352 in += info.inJump;
\r
8353 out += info.outJump;
\r
8356 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8357 Float32 *in = (Float32 *)inBuffer;
\r
8358 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8359 for (j=0; j<info.channels; j++) {
\r
8360 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8362 in += info.inJump;
\r
8363 out += info.outJump;
\r
8366 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8367 Float64 *in = (Float64 *)inBuffer;
\r
8368 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8369 for (j=0; j<info.channels; j++) {
\r
8370 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8372 in += info.inJump;
\r
8373 out += info.outJump;
\r
8377 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8378 signed char *out = (signed char *)outBuffer;
\r
8379 if (info.inFormat == RTAUDIO_SINT8) {
\r
8380 // Channel compensation and/or (de)interleaving only.
\r
8381 signed char *in = (signed char *)inBuffer;
\r
8382 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8383 for (j=0; j<info.channels; j++) {
\r
8384 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8386 in += info.inJump;
\r
8387 out += info.outJump;
\r
8390 if (info.inFormat == RTAUDIO_SINT16) {
\r
8391 Int16 *in = (Int16 *)inBuffer;
\r
8392 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8393 for (j=0; j<info.channels; j++) {
\r
8394 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8396 in += info.inJump;
\r
8397 out += info.outJump;
\r
8400 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8401 Int24 *in = (Int24 *)inBuffer;
\r
8402 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8403 for (j=0; j<info.channels; j++) {
\r
8404 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8406 in += info.inJump;
\r
8407 out += info.outJump;
\r
8410 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8411 Int32 *in = (Int32 *)inBuffer;
\r
8412 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8413 for (j=0; j<info.channels; j++) {
\r
8414 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8416 in += info.inJump;
\r
8417 out += info.outJump;
\r
8420 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8421 Float32 *in = (Float32 *)inBuffer;
\r
8422 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8423 for (j=0; j<info.channels; j++) {
\r
8424 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8426 in += info.inJump;
\r
8427 out += info.outJump;
\r
8430 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8431 Float64 *in = (Float64 *)inBuffer;
\r
8432 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8433 for (j=0; j<info.channels; j++) {
\r
8434 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8436 in += info.inJump;
\r
8437 out += info.outJump;
\r
8443 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8444 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8445 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8447 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8449 register char val;
\r
8450 register char *ptr;
\r
8453 if ( format == RTAUDIO_SINT16 ) {
\r
8454 for ( unsigned int i=0; i<samples; i++ ) {
\r
8455 // Swap 1st and 2nd bytes.
\r
8457 *(ptr) = *(ptr+1);
\r
8460 // Increment 2 bytes.
\r
8464 else if ( format == RTAUDIO_SINT32 ||
\r
8465 format == RTAUDIO_FLOAT32 ) {
\r
8466 for ( unsigned int i=0; i<samples; i++ ) {
\r
8467 // Swap 1st and 4th bytes.
\r
8469 *(ptr) = *(ptr+3);
\r
8472 // Swap 2nd and 3rd bytes.
\r
8475 *(ptr) = *(ptr+1);
\r
8478 // Increment 3 more bytes.
\r
8482 else if ( format == RTAUDIO_SINT24 ) {
\r
8483 for ( unsigned int i=0; i<samples; i++ ) {
\r
8484 // Swap 1st and 3rd bytes.
\r
8486 *(ptr) = *(ptr+2);
\r
8489 // Increment 2 more bytes.
\r
8493 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8494 for ( unsigned int i=0; i<samples; i++ ) {
\r
8495 // Swap 1st and 8th bytes
\r
8497 *(ptr) = *(ptr+7);
\r
8500 // Swap 2nd and 7th bytes
\r
8503 *(ptr) = *(ptr+5);
\r
8506 // Swap 3rd and 6th bytes
\r
8509 *(ptr) = *(ptr+3);
\r
8512 // Swap 4th and 5th bytes
\r
8515 *(ptr) = *(ptr+1);
\r
8518 // Increment 5 more bytes.
\r
8524 // Indentation settings for Vim and Emacs
\r
8526 // Local Variables:
\r
8527 // c-basic-offset: 2
\r
8528 // indent-tabs-mode: nil
\r
8531 // vim: et sts=2 sw=2
\r