1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2012 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.11
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_PULSE__)
\r
91 apis.push_back( LINUX_PULSE );
\r
93 #if defined(__LINUX_OSS__)
\r
94 apis.push_back( LINUX_OSS );
\r
96 #if defined(__WINDOWS_ASIO__)
\r
97 apis.push_back( WINDOWS_ASIO );
\r
99 #if defined(__WINDOWS_DS__)
\r
100 apis.push_back( WINDOWS_DS );
\r
102 #if defined(__MACOSX_CORE__)
\r
103 apis.push_back( MACOSX_CORE );
\r
105 #if defined(__RTAUDIO_DUMMY__)
\r
106 apis.push_back( RTAUDIO_DUMMY );
\r
110 void RtAudio :: openRtApi( RtAudio::Api api )
\r
116 #if defined(__UNIX_JACK__)
\r
117 if ( api == UNIX_JACK )
\r
118 rtapi_ = new RtApiJack();
\r
120 #if defined(__LINUX_ALSA__)
\r
121 if ( api == LINUX_ALSA )
\r
122 rtapi_ = new RtApiAlsa();
\r
124 #if defined(__LINUX_PULSE__)
\r
125 if ( api == LINUX_PULSE )
\r
126 rtapi_ = new RtApiPulse();
\r
128 #if defined(__LINUX_OSS__)
\r
129 if ( api == LINUX_OSS )
\r
130 rtapi_ = new RtApiOss();
\r
132 #if defined(__WINDOWS_ASIO__)
\r
133 if ( api == WINDOWS_ASIO )
\r
134 rtapi_ = new RtApiAsio();
\r
136 #if defined(__WINDOWS_DS__)
\r
137 if ( api == WINDOWS_DS )
\r
138 rtapi_ = new RtApiDs();
\r
140 #if defined(__MACOSX_CORE__)
\r
141 if ( api == MACOSX_CORE )
\r
142 rtapi_ = new RtApiCore();
\r
144 #if defined(__RTAUDIO_DUMMY__)
\r
145 if ( api == RTAUDIO_DUMMY )
\r
146 rtapi_ = new RtApiDummy();
\r
150 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
154 if ( api != UNSPECIFIED ) {
\r
155 // Attempt to open the specified API.
\r
157 if ( rtapi_ ) return;
\r
159 // No compiled support for specified API value. Issue a debug
\r
160 // warning and continue as if no API was specified.
\r
161 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
164 // Iterate through the compiled APIs and return as soon as we find
\r
165 // one with at least one device or we reach the end of the list.
\r
166 std::vector< RtAudio::Api > apis;
\r
167 getCompiledApi( apis );
\r
168 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
169 openRtApi( apis[i] );
\r
170 if ( rtapi_->getDeviceCount() ) break;
\r
173 if ( rtapi_ ) return;
\r
175 // It should not be possible to get here because the preprocessor
\r
176 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
177 // API-specific definitions are passed to the compiler. But just in
\r
178 // case something weird happens, we'll print out an error message.
\r
179 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
182 RtAudio :: ~RtAudio() throw()
\r
187 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
188 RtAudio::StreamParameters *inputParameters,
\r
189 RtAudioFormat format, unsigned int sampleRate,
\r
190 unsigned int *bufferFrames,
\r
191 RtAudioCallback callback, void *userData,
\r
192 RtAudio::StreamOptions *options )
\r
194 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
195 sampleRate, bufferFrames, callback,
\r
196 userData, options );
\r
199 // *************************************************** //
\r
201 // Public RtApi definitions (see end of file for
\r
202 // private or protected utility functions).
\r
204 // *************************************************** //
\r
208 stream_.state = STREAM_CLOSED;
\r
209 stream_.mode = UNINITIALIZED;
\r
210 stream_.apiHandle = 0;
\r
211 stream_.userBuffer[0] = 0;
\r
212 stream_.userBuffer[1] = 0;
\r
213 MUTEX_INITIALIZE( &stream_.mutex );
\r
214 showWarnings_ = true;
\r
219 MUTEX_DESTROY( &stream_.mutex );
\r
222 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
223 RtAudio::StreamParameters *iParams,
\r
224 RtAudioFormat format, unsigned int sampleRate,
\r
225 unsigned int *bufferFrames,
\r
226 RtAudioCallback callback, void *userData,
\r
227 RtAudio::StreamOptions *options )
\r
229 if ( stream_.state != STREAM_CLOSED ) {
\r
230 errorText_ = "RtApi::openStream: a stream is already open!";
\r
231 error( RtError::INVALID_USE );
\r
234 if ( oParams && oParams->nChannels < 1 ) {
\r
235 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
236 error( RtError::INVALID_USE );
\r
239 if ( iParams && iParams->nChannels < 1 ) {
\r
240 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
241 error( RtError::INVALID_USE );
\r
244 if ( oParams == NULL && iParams == NULL ) {
\r
245 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
246 error( RtError::INVALID_USE );
\r
249 if ( formatBytes(format) == 0 ) {
\r
250 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
251 error( RtError::INVALID_USE );
\r
254 unsigned int nDevices = getDeviceCount();
\r
255 unsigned int oChannels = 0;
\r
257 oChannels = oParams->nChannels;
\r
258 if ( oParams->deviceId >= nDevices ) {
\r
259 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
260 error( RtError::INVALID_USE );
\r
264 unsigned int iChannels = 0;
\r
266 iChannels = iParams->nChannels;
\r
267 if ( iParams->deviceId >= nDevices ) {
\r
268 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
269 error( RtError::INVALID_USE );
\r
276 if ( oChannels > 0 ) {
\r
278 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
279 sampleRate, format, bufferFrames, options );
\r
280 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
283 if ( iChannels > 0 ) {
\r
285 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
286 sampleRate, format, bufferFrames, options );
\r
287 if ( result == false ) {
\r
288 if ( oChannels > 0 ) closeStream();
\r
289 error( RtError::SYSTEM_ERROR );
\r
293 stream_.callbackInfo.callback = (void *) callback;
\r
294 stream_.callbackInfo.userData = userData;
\r
296 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
297 stream_.state = STREAM_STOPPED;
\r
300 unsigned int RtApi :: getDefaultInputDevice( void )
\r
302 // Should be implemented in subclasses if possible.
\r
306 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
308 // Should be implemented in subclasses if possible.
\r
312 void RtApi :: closeStream( void )
\r
314 // MUST be implemented in subclasses!
\r
318 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
319 unsigned int firstChannel, unsigned int sampleRate,
\r
320 RtAudioFormat format, unsigned int *bufferSize,
\r
321 RtAudio::StreamOptions *options )
\r
323 // MUST be implemented in subclasses!
\r
327 void RtApi :: tickStreamTime( void )
\r
329 // Subclasses that do not provide their own implementation of
\r
330 // getStreamTime should call this function once per buffer I/O to
\r
331 // provide basic stream time support.
\r
333 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
335 #if defined( HAVE_GETTIMEOFDAY )
\r
336 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
340 long RtApi :: getStreamLatency( void )
\r
344 long totalLatency = 0;
\r
345 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
346 totalLatency = stream_.latency[0];
\r
347 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
348 totalLatency += stream_.latency[1];
\r
350 return totalLatency;
\r
353 double RtApi :: getStreamTime( void )
\r
357 #if defined( HAVE_GETTIMEOFDAY )
\r
358 // Return a very accurate estimate of the stream time by
\r
359 // adding in the elapsed time since the last tick.
\r
360 struct timeval then;
\r
361 struct timeval now;
\r
363 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
364 return stream_.streamTime;
\r
366 gettimeofday( &now, NULL );
\r
367 then = stream_.lastTickTimestamp;
\r
368 return stream_.streamTime +
\r
369 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
370 (then.tv_sec + 0.000001 * then.tv_usec));
\r
372 return stream_.streamTime;
\r
376 unsigned int RtApi :: getStreamSampleRate( void )
\r
380 return stream_.sampleRate;
\r
384 // *************************************************** //
\r
386 // OS/API-specific methods.
\r
388 // *************************************************** //
\r
390 #if defined(__MACOSX_CORE__)
\r
392 // The OS X CoreAudio API is designed to use a separate callback
\r
393 // procedure for each of its audio devices. A single RtAudio duplex
\r
394 // stream using two different devices is supported here, though it
\r
395 // cannot be guaranteed to always behave correctly because we cannot
\r
396 // synchronize these two callbacks.
\r
398 // A property listener is installed for over/underrun information.
\r
399 // However, no functionality is currently provided to allow property
\r
400 // listeners to trigger user handlers because it is unclear what could
\r
401 // be done if a critical stream parameter (buffer size, sample rate,
\r
402 // device disconnect) notification arrived. The listeners entail
\r
403 // quite a bit of extra code and most likely, a user program wouldn't
\r
404 // be prepared for the result anyway. However, we do provide a flag
\r
405 // to the client callback function to inform of an over/underrun.
\r
407 // A structure to hold various information related to the CoreAudio API
\r
409 struct CoreHandle {
\r
410 AudioDeviceID id[2]; // device ids
\r
411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
412 AudioDeviceIOProcID procId[2];
\r
414 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
415 UInt32 nStreams[2]; // number of streams to use
\r
417 char *deviceBuffer;
\r
418 pthread_cond_t condition;
\r
419 int drainCounter; // Tracks callback counts when draining
\r
420 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
423 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
426 ThreadHandle threadId;
\r
428 RtApiCore:: RtApiCore()
\r
430 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
431 // This is a largely undocumented but absolutely necessary
\r
432 // requirement starting with OS-X 10.6. If not called, queries and
\r
433 // updates to various audio device properties are not handled
\r
435 CFRunLoopRef theRunLoop = NULL;
\r
436 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
437 kAudioObjectPropertyScopeGlobal,
\r
438 kAudioObjectPropertyElementMaster };
\r
439 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
440 if ( result != noErr ) {
\r
441 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
442 error( RtError::WARNING );
\r
447 RtApiCore :: ~RtApiCore()
\r
449 // The subclass destructor gets called before the base class
\r
450 // destructor, so close an existing stream before deallocating
\r
451 // apiDeviceId memory.
\r
452 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
455 unsigned int RtApiCore :: getDeviceCount( void )
\r
457 // Find out how many audio devices there are, if any.
\r
459 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
460 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
461 if ( result != noErr ) {
\r
462 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
463 error( RtError::WARNING );
\r
467 return dataSize / sizeof( AudioDeviceID );
\r
470 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
472 unsigned int nDevices = getDeviceCount();
\r
473 if ( nDevices <= 1 ) return 0;
\r
476 UInt32 dataSize = sizeof( AudioDeviceID );
\r
477 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
478 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
479 if ( result != noErr ) {
\r
480 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
481 error( RtError::WARNING );
\r
485 dataSize *= nDevices;
\r
486 AudioDeviceID deviceList[ nDevices ];
\r
487 property.mSelector = kAudioHardwarePropertyDevices;
\r
488 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
489 if ( result != noErr ) {
\r
490 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
491 error( RtError::WARNING );
\r
495 for ( unsigned int i=0; i<nDevices; i++ )
\r
496 if ( id == deviceList[i] ) return i;
\r
498 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
499 error( RtError::WARNING );
\r
503 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
505 unsigned int nDevices = getDeviceCount();
\r
506 if ( nDevices <= 1 ) return 0;
\r
509 UInt32 dataSize = sizeof( AudioDeviceID );
\r
510 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
511 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
512 if ( result != noErr ) {
\r
513 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
514 error( RtError::WARNING );
\r
518 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
519 AudioDeviceID deviceList[ nDevices ];
\r
520 property.mSelector = kAudioHardwarePropertyDevices;
\r
521 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
522 if ( result != noErr ) {
\r
523 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
524 error( RtError::WARNING );
\r
528 for ( unsigned int i=0; i<nDevices; i++ )
\r
529 if ( id == deviceList[i] ) return i;
\r
531 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
532 error( RtError::WARNING );
\r
536 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
538 RtAudio::DeviceInfo info;
\r
539 info.probed = false;
\r
542 unsigned int nDevices = getDeviceCount();
\r
543 if ( nDevices == 0 ) {
\r
544 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
545 error( RtError::INVALID_USE );
\r
548 if ( device >= nDevices ) {
\r
549 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
550 error( RtError::INVALID_USE );
\r
553 AudioDeviceID deviceList[ nDevices ];
\r
554 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
555 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
556 kAudioObjectPropertyScopeGlobal,
\r
557 kAudioObjectPropertyElementMaster };
\r
558 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
559 0, NULL, &dataSize, (void *) &deviceList );
\r
560 if ( result != noErr ) {
\r
561 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
562 error( RtError::WARNING );
\r
566 AudioDeviceID id = deviceList[ device ];
\r
568 // Get the device name.
\r
570 CFStringRef cfname;
\r
571 dataSize = sizeof( CFStringRef );
\r
572 property.mSelector = kAudioObjectPropertyManufacturer;
\r
573 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
574 if ( result != noErr ) {
\r
575 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
576 errorText_ = errorStream_.str();
\r
577 error( RtError::WARNING );
\r
581 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
582 int length = CFStringGetLength(cfname);
\r
583 char *mname = (char *)malloc(length * 3 + 1);
\r
584 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
585 info.name.append( (const char *)mname, strlen(mname) );
\r
586 info.name.append( ": " );
\r
587 CFRelease( cfname );
\r
590 property.mSelector = kAudioObjectPropertyName;
\r
591 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
592 if ( result != noErr ) {
\r
593 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
594 errorText_ = errorStream_.str();
\r
595 error( RtError::WARNING );
\r
599 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
600 length = CFStringGetLength(cfname);
\r
601 char *name = (char *)malloc(length * 3 + 1);
\r
602 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
603 info.name.append( (const char *)name, strlen(name) );
\r
604 CFRelease( cfname );
\r
607 // Get the output stream "configuration".
\r
608 AudioBufferList *bufferList = nil;
\r
609 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
610 property.mScope = kAudioDevicePropertyScopeOutput;
\r
611 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
613 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
614 if ( result != noErr || dataSize == 0 ) {
\r
615 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
616 errorText_ = errorStream_.str();
\r
617 error( RtError::WARNING );
\r
621 // Allocate the AudioBufferList.
\r
622 bufferList = (AudioBufferList *) malloc( dataSize );
\r
623 if ( bufferList == NULL ) {
\r
624 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
625 error( RtError::WARNING );
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
630 if ( result != noErr || dataSize == 0 ) {
\r
631 free( bufferList );
\r
632 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
633 errorText_ = errorStream_.str();
\r
634 error( RtError::WARNING );
\r
638 // Get output channel information.
\r
639 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
640 for ( i=0; i<nStreams; i++ )
\r
641 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
642 free( bufferList );
\r
644 // Get the input stream "configuration".
\r
645 property.mScope = kAudioDevicePropertyScopeInput;
\r
646 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
647 if ( result != noErr || dataSize == 0 ) {
\r
648 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
649 errorText_ = errorStream_.str();
\r
650 error( RtError::WARNING );
\r
654 // Allocate the AudioBufferList.
\r
655 bufferList = (AudioBufferList *) malloc( dataSize );
\r
656 if ( bufferList == NULL ) {
\r
657 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
658 error( RtError::WARNING );
\r
662 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
663 if (result != noErr || dataSize == 0) {
\r
664 free( bufferList );
\r
665 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
666 errorText_ = errorStream_.str();
\r
667 error( RtError::WARNING );
\r
671 // Get input channel information.
\r
672 nStreams = bufferList->mNumberBuffers;
\r
673 for ( i=0; i<nStreams; i++ )
\r
674 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
675 free( bufferList );
\r
677 // If device opens for both playback and capture, we determine the channels.
\r
678 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
679 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
681 // Probe the device sample rates.
\r
682 bool isInput = false;
\r
683 if ( info.outputChannels == 0 ) isInput = true;
\r
685 // Determine the supported sample rates.
\r
686 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
687 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
688 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
689 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
690 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
691 errorText_ = errorStream_.str();
\r
692 error( RtError::WARNING );
\r
696 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
697 AudioValueRange rangeList[ nRanges ];
\r
698 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
699 if ( result != kAudioHardwareNoError ) {
\r
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
701 errorText_ = errorStream_.str();
\r
702 error( RtError::WARNING );
\r
706 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
707 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
708 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
709 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
712 info.sampleRates.clear();
\r
713 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
714 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
715 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
718 if ( info.sampleRates.size() == 0 ) {
\r
719 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
720 errorText_ = errorStream_.str();
\r
721 error( RtError::WARNING );
\r
725 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
726 // Thus, any other "physical" formats supported by the device are of
\r
727 // no interest to the client.
\r
728 info.nativeFormats = RTAUDIO_FLOAT32;
\r
730 if ( info.outputChannels > 0 )
\r
731 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
732 if ( info.inputChannels > 0 )
\r
733 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
735 info.probed = true;
\r
739 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
740 const AudioTimeStamp* inNow,
\r
741 const AudioBufferList* inInputData,
\r
742 const AudioTimeStamp* inInputTime,
\r
743 AudioBufferList* outOutputData,
\r
744 const AudioTimeStamp* inOutputTime,
\r
745 void* infoPointer )
\r
747 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
749 RtApiCore *object = (RtApiCore *) info->object;
\r
750 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
751 return kAudioHardwareUnspecifiedError;
\r
753 return kAudioHardwareNoError;
\r
756 OSStatus xrunListener( AudioObjectID inDevice,
\r
758 const AudioObjectPropertyAddress properties[],
\r
759 void* handlePointer )
\r
761 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
762 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
763 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
764 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
765 handle->xrun[1] = true;
\r
767 handle->xrun[0] = true;
\r
771 return kAudioHardwareNoError;
\r
774 OSStatus rateListener( AudioObjectID inDevice,
\r
776 const AudioObjectPropertyAddress properties[],
\r
777 void* ratePointer )
\r
780 Float64 *rate = (Float64 *) ratePointer;
\r
781 UInt32 dataSize = sizeof( Float64 );
\r
782 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
783 kAudioObjectPropertyScopeGlobal,
\r
784 kAudioObjectPropertyElementMaster };
\r
785 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
786 return kAudioHardwareNoError;
\r
789 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
790 unsigned int firstChannel, unsigned int sampleRate,
\r
791 RtAudioFormat format, unsigned int *bufferSize,
\r
792 RtAudio::StreamOptions *options )
\r
795 unsigned int nDevices = getDeviceCount();
\r
796 if ( nDevices == 0 ) {
\r
797 // This should not happen because a check is made before this function is called.
\r
798 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
802 if ( device >= nDevices ) {
\r
803 // This should not happen because a check is made before this function is called.
\r
804 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
808 AudioDeviceID deviceList[ nDevices ];
\r
809 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
810 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
811 kAudioObjectPropertyScopeGlobal,
\r
812 kAudioObjectPropertyElementMaster };
\r
813 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
814 0, NULL, &dataSize, (void *) &deviceList );
\r
815 if ( result != noErr ) {
\r
816 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
820 AudioDeviceID id = deviceList[ device ];
\r
822 // Setup for stream mode.
\r
823 bool isInput = false;
\r
824 if ( mode == INPUT ) {
\r
826 property.mScope = kAudioDevicePropertyScopeInput;
\r
829 property.mScope = kAudioDevicePropertyScopeOutput;
\r
831 // Get the stream "configuration".
\r
832 AudioBufferList *bufferList = nil;
\r
834 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
835 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
836 if ( result != noErr || dataSize == 0 ) {
\r
837 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
838 errorText_ = errorStream_.str();
\r
842 // Allocate the AudioBufferList.
\r
843 bufferList = (AudioBufferList *) malloc( dataSize );
\r
844 if ( bufferList == NULL ) {
\r
845 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
849 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
850 if (result != noErr || dataSize == 0) {
\r
851 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
852 errorText_ = errorStream_.str();
\r
856 // Search for one or more streams that contain the desired number of
\r
857 // channels. CoreAudio devices can have an arbitrary number of
\r
858 // streams and each stream can have an arbitrary number of channels.
\r
859 // For each stream, a single buffer of interleaved samples is
\r
860 // provided. RtAudio prefers the use of one stream of interleaved
\r
861 // data or multiple consecutive single-channel streams. However, we
\r
862 // now support multiple consecutive multi-channel streams of
\r
863 // interleaved data as well.
\r
864 UInt32 iStream, offsetCounter = firstChannel;
\r
865 UInt32 nStreams = bufferList->mNumberBuffers;
\r
866 bool monoMode = false;
\r
867 bool foundStream = false;
\r
869 // First check that the device supports the requested number of
\r
871 UInt32 deviceChannels = 0;
\r
872 for ( iStream=0; iStream<nStreams; iStream++ )
\r
873 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
875 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
876 free( bufferList );
\r
877 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
878 errorText_ = errorStream_.str();
\r
882 // Look for a single stream meeting our needs.
\r
883 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
884 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
885 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
886 if ( streamChannels >= channels + offsetCounter ) {
\r
887 firstStream = iStream;
\r
888 channelOffset = offsetCounter;
\r
889 foundStream = true;
\r
892 if ( streamChannels > offsetCounter ) break;
\r
893 offsetCounter -= streamChannels;
\r
896 // If we didn't find a single stream above, then we should be able
\r
897 // to meet the channel specification with multiple streams.
\r
898 if ( foundStream == false ) {
\r
900 offsetCounter = firstChannel;
\r
901 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
902 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
903 if ( streamChannels > offsetCounter ) break;
\r
904 offsetCounter -= streamChannels;
\r
907 firstStream = iStream;
\r
908 channelOffset = offsetCounter;
\r
909 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
911 if ( streamChannels > 1 ) monoMode = false;
\r
912 while ( channelCounter > 0 ) {
\r
913 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
914 if ( streamChannels > 1 ) monoMode = false;
\r
915 channelCounter -= streamChannels;
\r
920 free( bufferList );
\r
922 // Determine the buffer size.
\r
923 AudioValueRange bufferRange;
\r
924 dataSize = sizeof( AudioValueRange );
\r
925 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
926 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
928 if ( result != noErr ) {
\r
929 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
930 errorText_ = errorStream_.str();
\r
934 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
935 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
936 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
938 // Set the buffer size. For multiple streams, I'm assuming we only
\r
939 // need to make this setting for the master channel.
\r
940 UInt32 theSize = (UInt32) *bufferSize;
\r
941 dataSize = sizeof( UInt32 );
\r
942 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
943 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
945 if ( result != noErr ) {
\r
946 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
947 errorText_ = errorStream_.str();
\r
951 // If attempting to setup a duplex stream, the bufferSize parameter
\r
952 // MUST be the same in both directions!
\r
953 *bufferSize = theSize;
\r
954 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
955 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
956 errorText_ = errorStream_.str();
\r
960 stream_.bufferSize = *bufferSize;
\r
961 stream_.nBuffers = 1;
\r
963 // Try to set "hog" mode ... it's not clear to me this is working.
\r
964 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
966 dataSize = sizeof( hog_pid );
\r
967 property.mSelector = kAudioDevicePropertyHogMode;
\r
968 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
969 if ( result != noErr ) {
\r
970 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
971 errorText_ = errorStream_.str();
\r
975 if ( hog_pid != getpid() ) {
\r
976 hog_pid = getpid();
\r
977 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
978 if ( result != noErr ) {
\r
979 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
980 errorText_ = errorStream_.str();
\r
986 // Check and if necessary, change the sample rate for the device.
\r
987 Float64 nominalRate;
\r
988 dataSize = sizeof( Float64 );
\r
989 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
990 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
992 if ( result != noErr ) {
\r
993 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
994 errorText_ = errorStream_.str();
\r
998 // Only change the sample rate if off by more than 1 Hz.
\r
999 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1001 // Set a property listener for the sample rate change
\r
1002 Float64 reportedRate = 0.0;
\r
1003 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1004 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1005 if ( result != noErr ) {
\r
1006 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1007 errorText_ = errorStream_.str();
\r
1011 nominalRate = (Float64) sampleRate;
\r
1012 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1014 if ( result != noErr ) {
\r
1015 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1016 errorText_ = errorStream_.str();
\r
1020 // Now wait until the reported nominal rate is what we just set.
\r
1021 UInt32 microCounter = 0;
\r
1022 while ( reportedRate != nominalRate ) {
\r
1023 microCounter += 5000;
\r
1024 if ( microCounter > 5000000 ) break;
\r
1028 // Remove the property listener.
\r
1029 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1031 if ( microCounter > 5000000 ) {
\r
1032 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1033 errorText_ = errorStream_.str();
\r
1038 // Now set the stream format for all streams. Also, check the
\r
1039 // physical format of the device and change that if necessary.
\r
1040 AudioStreamBasicDescription description;
\r
1041 dataSize = sizeof( AudioStreamBasicDescription );
\r
1042 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1043 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1044 if ( result != noErr ) {
\r
1045 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1046 errorText_ = errorStream_.str();
\r
1050 // Set the sample rate and data format id. However, only make the
\r
1051 // change if the sample rate is not within 1.0 of the desired
\r
1052 // rate and the format is not linear pcm.
\r
1053 bool updateFormat = false;
\r
1054 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1055 description.mSampleRate = (Float64) sampleRate;
\r
1056 updateFormat = true;
\r
1059 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1060 description.mFormatID = kAudioFormatLinearPCM;
\r
1061 updateFormat = true;
\r
1064 if ( updateFormat ) {
\r
1065 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1066 if ( result != noErr ) {
\r
1067 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1068 errorText_ = errorStream_.str();
\r
1073 // Now check the physical format.
\r
1074 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1075 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1076 if ( result != noErr ) {
\r
1077 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1078 errorText_ = errorStream_.str();
\r
1082 //std::cout << "Current physical stream format:" << std::endl;
\r
1083 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1084 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1085 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1086 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1088 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1089 description.mFormatID = kAudioFormatLinearPCM;
\r
1090 //description.mSampleRate = (Float64) sampleRate;
\r
1091 AudioStreamBasicDescription testDescription = description;
\r
1092 UInt32 formatFlags;
\r
1094 // We'll try higher bit rates first and then work our way down.
\r
1095 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1096 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1097 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1098 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1099 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1100 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1101 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1102 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1103 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1104 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1105 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1106 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1107 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1109 bool setPhysicalFormat = false;
\r
1110 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1111 testDescription = description;
\r
1112 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1113 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1114 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1115 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1117 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1118 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1119 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1120 if ( result == noErr ) {
\r
1121 setPhysicalFormat = true;
\r
1122 //std::cout << "Updated physical stream format:" << std::endl;
\r
1123 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1124 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1125 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1126 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1131 if ( !setPhysicalFormat ) {
\r
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1133 errorText_ = errorStream_.str();
\r
1136 } // done setting virtual/physical formats.
\r
1138 // Get the stream / device latency.
\r
1140 dataSize = sizeof( UInt32 );
\r
1141 property.mSelector = kAudioDevicePropertyLatency;
\r
1142 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1144 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1147 errorText_ = errorStream_.str();
\r
1148 error( RtError::WARNING );
\r
1152 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1153 // always be presented in native-endian format, so we should never
\r
1154 // need to byte swap.
\r
1155 stream_.doByteSwap[mode] = false;
\r
1157 // From the CoreAudio documentation, PCM data must be supplied as
\r
1159 stream_.userFormat = format;
\r
1160 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1162 if ( streamCount == 1 )
\r
1163 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1164 else // multiple streams
\r
1165 stream_.nDeviceChannels[mode] = channels;
\r
1166 stream_.nUserChannels[mode] = channels;
\r
1167 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1168 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1169 else stream_.userInterleaved = true;
\r
1170 stream_.deviceInterleaved[mode] = true;
\r
1171 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1173 // Set flags for buffer conversion.
\r
1174 stream_.doConvertBuffer[mode] = false;
\r
1175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1176 stream_.doConvertBuffer[mode] = true;
\r
1177 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1178 stream_.doConvertBuffer[mode] = true;
\r
1179 if ( streamCount == 1 ) {
\r
1180 if ( stream_.nUserChannels[mode] > 1 &&
\r
1181 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1182 stream_.doConvertBuffer[mode] = true;
\r
1184 else if ( monoMode && stream_.userInterleaved )
\r
1185 stream_.doConvertBuffer[mode] = true;
\r
1187 // Allocate our CoreHandle structure for the stream.
\r
1188 CoreHandle *handle = 0;
\r
1189 if ( stream_.apiHandle == 0 ) {
\r
1191 handle = new CoreHandle;
\r
1193 catch ( std::bad_alloc& ) {
\r
1194 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1198 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1199 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1202 stream_.apiHandle = (void *) handle;
\r
1205 handle = (CoreHandle *) stream_.apiHandle;
\r
1206 handle->iStream[mode] = firstStream;
\r
1207 handle->nStreams[mode] = streamCount;
\r
1208 handle->id[mode] = id;
\r
1210 // Allocate necessary internal buffers.
\r
1211 unsigned long bufferBytes;
\r
1212 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1213 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1214 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1215 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1216 if ( stream_.userBuffer[mode] == NULL ) {
\r
1217 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1221 // If possible, we will make use of the CoreAudio stream buffers as
\r
1222 // "device buffers". However, we can't do this if using multiple
\r
1224 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1226 bool makeBuffer = true;
\r
1227 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1228 if ( mode == INPUT ) {
\r
1229 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1230 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1231 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1235 if ( makeBuffer ) {
\r
1236 bufferBytes *= *bufferSize;
\r
1237 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1238 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1239 if ( stream_.deviceBuffer == NULL ) {
\r
1240 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1246 stream_.sampleRate = sampleRate;
\r
1247 stream_.device[mode] = device;
\r
1248 stream_.state = STREAM_STOPPED;
\r
1249 stream_.callbackInfo.object = (void *) this;
\r
1251 // Setup the buffer conversion information structure.
\r
1252 if ( stream_.doConvertBuffer[mode] ) {
\r
1253 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1254 else setConvertInfo( mode, channelOffset );
\r
1257 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1258 // Only one callback procedure per device.
\r
1259 stream_.mode = DUPLEX;
\r
1261 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1262 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1264 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1265 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1267 if ( result != noErr ) {
\r
1268 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1269 errorText_ = errorStream_.str();
\r
1272 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1273 stream_.mode = DUPLEX;
\r
1275 stream_.mode = mode;
\r
1278 // Setup the device property listener for over/underload.
\r
1279 property.mSelector = kAudioDeviceProcessorOverload;
\r
1280 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1286 pthread_cond_destroy( &handle->condition );
\r
1288 stream_.apiHandle = 0;
\r
1291 for ( int i=0; i<2; i++ ) {
\r
1292 if ( stream_.userBuffer[i] ) {
\r
1293 free( stream_.userBuffer[i] );
\r
1294 stream_.userBuffer[i] = 0;
\r
1298 if ( stream_.deviceBuffer ) {
\r
1299 free( stream_.deviceBuffer );
\r
1300 stream_.deviceBuffer = 0;
\r
1306 void RtApiCore :: closeStream( void )
\r
1308 if ( stream_.state == STREAM_CLOSED ) {
\r
1309 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1310 error( RtError::WARNING );
\r
1314 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1315 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1316 if ( stream_.state == STREAM_RUNNING )
\r
1317 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1318 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1319 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1321 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1322 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1326 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1327 if ( stream_.state == STREAM_RUNNING )
\r
1328 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1329 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1330 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1332 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1333 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1337 for ( int i=0; i<2; i++ ) {
\r
1338 if ( stream_.userBuffer[i] ) {
\r
1339 free( stream_.userBuffer[i] );
\r
1340 stream_.userBuffer[i] = 0;
\r
1344 if ( stream_.deviceBuffer ) {
\r
1345 free( stream_.deviceBuffer );
\r
1346 stream_.deviceBuffer = 0;
\r
1349 // Destroy pthread condition variable.
\r
1350 pthread_cond_destroy( &handle->condition );
\r
1352 stream_.apiHandle = 0;
\r
1354 stream_.mode = UNINITIALIZED;
\r
1355 stream_.state = STREAM_CLOSED;
\r
1358 void RtApiCore :: startStream( void )
\r
1361 if ( stream_.state == STREAM_RUNNING ) {
\r
1362 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1363 error( RtError::WARNING );
\r
1367 OSStatus result = noErr;
\r
1368 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1369 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1371 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1372 if ( result != noErr ) {
\r
1373 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1374 errorText_ = errorStream_.str();
\r
1379 if ( stream_.mode == INPUT ||
\r
1380 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1382 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1383 if ( result != noErr ) {
\r
1384 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1385 errorText_ = errorStream_.str();
\r
1390 handle->drainCounter = 0;
\r
1391 handle->internalDrain = false;
\r
1392 stream_.state = STREAM_RUNNING;
\r
1395 if ( result == noErr ) return;
\r
1396 error( RtError::SYSTEM_ERROR );
\r
1399 void RtApiCore :: stopStream( void )
\r
1402 if ( stream_.state == STREAM_STOPPED ) {
\r
1403 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1404 error( RtError::WARNING );
\r
1408 OSStatus result = noErr;
\r
1409 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1412 if ( handle->drainCounter == 0 ) {
\r
1413 handle->drainCounter = 2;
\r
1414 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1417 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1418 if ( result != noErr ) {
\r
1419 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1420 errorText_ = errorStream_.str();
\r
1425 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1427 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1428 if ( result != noErr ) {
\r
1429 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1430 errorText_ = errorStream_.str();
\r
1435 stream_.state = STREAM_STOPPED;
\r
1438 if ( result == noErr ) return;
\r
1439 error( RtError::SYSTEM_ERROR );
\r
1442 void RtApiCore :: abortStream( void )
\r
1445 if ( stream_.state == STREAM_STOPPED ) {
\r
1446 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1447 error( RtError::WARNING );
\r
1451 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1452 handle->drainCounter = 2;
\r
1457 // This function will be called by a spawned thread when the user
\r
1458 // callback function signals that the stream should be stopped or
\r
1459 // aborted. It is better to handle it this way because the
\r
1460 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1461 // function is called.
\r
1462 extern "C" void *coreStopStream( void *ptr )
\r
1464 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1465 RtApiCore *object = (RtApiCore *) info->object;
\r
1467 object->stopStream();
\r
1468 pthread_exit( NULL );
\r
1471 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1472 const AudioBufferList *inBufferList,
\r
1473 const AudioBufferList *outBufferList )
\r
1475 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1476 if ( stream_.state == STREAM_CLOSED ) {
\r
1477 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1478 error( RtError::WARNING );
\r
1482 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1483 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1485 // Check if we were draining the stream and signal is finished.
\r
1486 if ( handle->drainCounter > 3 ) {
\r
1488 stream_.state = STREAM_STOPPING;
\r
1489 if ( handle->internalDrain == true )
\r
1490 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1491 else // external call to stopStream()
\r
1492 pthread_cond_signal( &handle->condition );
\r
1496 AudioDeviceID outputDevice = handle->id[0];
\r
1498 // Invoke user callback to get fresh output data UNLESS we are
\r
1499 // draining stream or duplex mode AND the input/output devices are
\r
1500 // different AND this function is called for the input device.
\r
1501 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1502 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1503 double streamTime = getStreamTime();
\r
1504 RtAudioStreamStatus status = 0;
\r
1505 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1506 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1507 handle->xrun[0] = false;
\r
1509 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1510 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1511 handle->xrun[1] = false;
\r
1514 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1515 stream_.bufferSize, streamTime, status, info->userData );
\r
1516 if ( cbReturnValue == 2 ) {
\r
1517 stream_.state = STREAM_STOPPING;
\r
1518 handle->drainCounter = 2;
\r
1522 else if ( cbReturnValue == 1 ) {
\r
1523 handle->drainCounter = 1;
\r
1524 handle->internalDrain = true;
\r
1528 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1530 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1532 if ( handle->nStreams[0] == 1 ) {
\r
1533 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1535 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1537 else { // fill multiple streams with zeros
\r
1538 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1539 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1541 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1545 else if ( handle->nStreams[0] == 1 ) {
\r
1546 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1547 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1548 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1550 else { // copy from user buffer
\r
1551 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1552 stream_.userBuffer[0],
\r
1553 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1556 else { // fill multiple streams
\r
1557 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1558 if ( stream_.doConvertBuffer[0] ) {
\r
1559 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1560 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1563 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1564 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1565 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1566 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1567 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1570 else { // fill multiple multi-channel streams with interleaved data
\r
1571 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1572 Float32 *out, *in;
\r
1574 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1575 UInt32 inChannels = stream_.nUserChannels[0];
\r
1576 if ( stream_.doConvertBuffer[0] ) {
\r
1577 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1578 inChannels = stream_.nDeviceChannels[0];
\r
1581 if ( inInterleaved ) inOffset = 1;
\r
1582 else inOffset = stream_.bufferSize;
\r
1584 channelsLeft = inChannels;
\r
1585 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1587 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1588 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1591 // Account for possible channel offset in first stream
\r
1592 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1593 streamChannels -= stream_.channelOffset[0];
\r
1594 outJump = stream_.channelOffset[0];
\r
1598 // Account for possible unfilled channels at end of the last stream
\r
1599 if ( streamChannels > channelsLeft ) {
\r
1600 outJump = streamChannels - channelsLeft;
\r
1601 streamChannels = channelsLeft;
\r
1604 // Determine input buffer offsets and skips
\r
1605 if ( inInterleaved ) {
\r
1606 inJump = inChannels;
\r
1607 in += inChannels - channelsLeft;
\r
1611 in += (inChannels - channelsLeft) * inOffset;
\r
1614 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1615 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1616 *out++ = in[j*inOffset];
\r
1621 channelsLeft -= streamChannels;
\r
1626 if ( handle->drainCounter ) {
\r
1627 handle->drainCounter++;
\r
1632 AudioDeviceID inputDevice;
\r
1633 inputDevice = handle->id[1];
\r
1634 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1636 if ( handle->nStreams[1] == 1 ) {
\r
1637 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1638 convertBuffer( stream_.userBuffer[1],
\r
1639 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1640 stream_.convertInfo[1] );
\r
1642 else { // copy to user buffer
\r
1643 memcpy( stream_.userBuffer[1],
\r
1644 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1645 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1648 else { // read from multiple streams
\r
1649 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1650 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1652 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1653 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1655 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1656 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1659 else { // read from multiple multi-channel streams
\r
1660 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1661 Float32 *out, *in;
\r
1663 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1664 UInt32 outChannels = stream_.nUserChannels[1];
\r
1665 if ( stream_.doConvertBuffer[1] ) {
\r
1666 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1667 outChannels = stream_.nDeviceChannels[1];
\r
1670 if ( outInterleaved ) outOffset = 1;
\r
1671 else outOffset = stream_.bufferSize;
\r
1673 channelsLeft = outChannels;
\r
1674 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1676 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1677 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1680 // Account for possible channel offset in first stream
\r
1681 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1682 streamChannels -= stream_.channelOffset[1];
\r
1683 inJump = stream_.channelOffset[1];
\r
1687 // Account for possible unread channels at end of the last stream
\r
1688 if ( streamChannels > channelsLeft ) {
\r
1689 inJump = streamChannels - channelsLeft;
\r
1690 streamChannels = channelsLeft;
\r
1693 // Determine output buffer offsets and skips
\r
1694 if ( outInterleaved ) {
\r
1695 outJump = outChannels;
\r
1696 out += outChannels - channelsLeft;
\r
1700 out += (outChannels - channelsLeft) * outOffset;
\r
1703 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1704 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1705 out[j*outOffset] = *in++;
\r
1710 channelsLeft -= streamChannels;
\r
1714 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1715 convertBuffer( stream_.userBuffer[1],
\r
1716 stream_.deviceBuffer,
\r
1717 stream_.convertInfo[1] );
\r
1723 //MUTEX_UNLOCK( &stream_.mutex );
\r
1725 RtApi::tickStreamTime();
\r
1729 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1733 case kAudioHardwareNotRunningError:
\r
1734 return "kAudioHardwareNotRunningError";
\r
1736 case kAudioHardwareUnspecifiedError:
\r
1737 return "kAudioHardwareUnspecifiedError";
\r
1739 case kAudioHardwareUnknownPropertyError:
\r
1740 return "kAudioHardwareUnknownPropertyError";
\r
1742 case kAudioHardwareBadPropertySizeError:
\r
1743 return "kAudioHardwareBadPropertySizeError";
\r
1745 case kAudioHardwareIllegalOperationError:
\r
1746 return "kAudioHardwareIllegalOperationError";
\r
1748 case kAudioHardwareBadObjectError:
\r
1749 return "kAudioHardwareBadObjectError";
\r
1751 case kAudioHardwareBadDeviceError:
\r
1752 return "kAudioHardwareBadDeviceError";
\r
1754 case kAudioHardwareBadStreamError:
\r
1755 return "kAudioHardwareBadStreamError";
\r
1757 case kAudioHardwareUnsupportedOperationError:
\r
1758 return "kAudioHardwareUnsupportedOperationError";
\r
1760 case kAudioDeviceUnsupportedFormatError:
\r
1761 return "kAudioDeviceUnsupportedFormatError";
\r
1763 case kAudioDevicePermissionsError:
\r
1764 return "kAudioDevicePermissionsError";
\r
1767 return "CoreAudio unknown error";
\r
1771 //******************** End of __MACOSX_CORE__ *********************//
\r
1774 #if defined(__UNIX_JACK__)
\r
1776 // JACK is a low-latency audio server, originally written for the
\r
1777 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1778 // connect a number of different applications to an audio device, as
\r
1779 // well as allowing them to share audio between themselves.
\r
1781 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1782 // have ports connected to the server. The JACK server is typically
\r
1783 // started in a terminal as follows:
\r
1785 // .jackd -d alsa -d hw:0
\r
1787 // or through an interface program such as qjackctl. Many of the
\r
1788 // parameters normally set for a stream are fixed by the JACK server
\r
1789 // and can be specified when the JACK server is started. In
\r
1792 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1794 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1795 // frames, and number of buffers = 4. Once the server is running, it
\r
1796 // is not possible to override these values. If the values are not
\r
1797 // specified in the command-line, the JACK server uses default values.
\r
1799 // The JACK server does not have to be running when an instance of
\r
1800 // RtApiJack is created, though the function getDeviceCount() will
\r
1801 // report 0 devices found until JACK has been started. When no
\r
1802 // devices are available (i.e., the JACK server is not running), a
\r
1803 // stream cannot be opened.
\r
1805 #include <jack/jack.h>
\r
1806 #include <unistd.h>
\r
1809 // A structure to hold various information related to the Jack API
\r
1810 // implementation.
\r
1811 struct JackHandle {
\r
1812 jack_client_t *client;
\r
1813 jack_port_t **ports[2];
\r
1814 std::string deviceName[2];
\r
1816 pthread_cond_t condition;
\r
1817 int drainCounter; // Tracks callback counts when draining
\r
1818 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1821 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1824 ThreadHandle threadId;
\r
1825 void jackSilentError( const char * ) {};
\r
1827 RtApiJack :: RtApiJack()
\r
1829 // Nothing to do here.
\r
1830 #if !defined(__RTAUDIO_DEBUG__)
\r
1831 // Turn off Jack's internal error reporting.
\r
1832 jack_set_error_function( &jackSilentError );
\r
1836 RtApiJack :: ~RtApiJack()
\r
1838 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1841 unsigned int RtApiJack :: getDeviceCount( void )
\r
1843 // See if we can become a jack client.
\r
1844 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1845 jack_status_t *status = NULL;
\r
1846 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1847 if ( client == 0 ) return 0;
\r
1849 const char **ports;
\r
1850 std::string port, previousPort;
\r
1851 unsigned int nChannels = 0, nDevices = 0;
\r
1852 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1854 // Parse the port names up to the first colon (:).
\r
1855 size_t iColon = 0;
\r
1857 port = (char *) ports[ nChannels ];
\r
1858 iColon = port.find(":");
\r
1859 if ( iColon != std::string::npos ) {
\r
1860 port = port.substr( 0, iColon + 1 );
\r
1861 if ( port != previousPort ) {
\r
1863 previousPort = port;
\r
1866 } while ( ports[++nChannels] );
\r
1870 jack_client_close( client );
\r
1874 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1876 RtAudio::DeviceInfo info;
\r
1877 info.probed = false;
\r
1879 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1880 jack_status_t *status = NULL;
\r
1881 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1882 if ( client == 0 ) {
\r
1883 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1884 error( RtError::WARNING );
\r
1888 const char **ports;
\r
1889 std::string port, previousPort;
\r
1890 unsigned int nPorts = 0, nDevices = 0;
\r
1891 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1893 // Parse the port names up to the first colon (:).
\r
1894 size_t iColon = 0;
\r
1896 port = (char *) ports[ nPorts ];
\r
1897 iColon = port.find(":");
\r
1898 if ( iColon != std::string::npos ) {
\r
1899 port = port.substr( 0, iColon );
\r
1900 if ( port != previousPort ) {
\r
1901 if ( nDevices == device ) info.name = port;
\r
1903 previousPort = port;
\r
1906 } while ( ports[++nPorts] );
\r
1910 if ( device >= nDevices ) {
\r
1911 jack_client_close( client );
\r
1912 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1913 error( RtError::INVALID_USE );
\r
1916 // Get the current jack server sample rate.
\r
1917 info.sampleRates.clear();
\r
1918 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1920 // Count the available ports containing the client name as device
\r
1921 // channels. Jack "input ports" equal RtAudio output channels.
\r
1922 unsigned int nChannels = 0;
\r
1923 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1925 while ( ports[ nChannels ] ) nChannels++;
\r
1927 info.outputChannels = nChannels;
\r
1930 // Jack "output ports" equal RtAudio input channels.
\r
1932 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1934 while ( ports[ nChannels ] ) nChannels++;
\r
1936 info.inputChannels = nChannels;
\r
1939 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1940 jack_client_close(client);
\r
1941 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1942 error( RtError::WARNING );
\r
1946 // If device opens for both playback and capture, we determine the channels.
\r
1947 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1948 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1950 // Jack always uses 32-bit floats.
\r
1951 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1953 // Jack doesn't provide default devices so we'll use the first available one.
\r
1954 if ( device == 0 && info.outputChannels > 0 )
\r
1955 info.isDefaultOutput = true;
\r
1956 if ( device == 0 && info.inputChannels > 0 )
\r
1957 info.isDefaultInput = true;
\r
1959 jack_client_close(client);
\r
1960 info.probed = true;
\r
1964 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1966 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1968 RtApiJack *object = (RtApiJack *) info->object;
\r
1969 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1974 // This function will be called by a spawned thread when the Jack
\r
1975 // server signals that it is shutting down. It is necessary to handle
\r
1976 // it this way because the jackShutdown() function must return before
\r
1977 // the jack_deactivate() function (in closeStream()) will return.
\r
1978 extern "C" void *jackCloseStream( void *ptr )
\r
1980 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1981 RtApiJack *object = (RtApiJack *) info->object;
\r
1983 object->closeStream();
\r
1985 pthread_exit( NULL );
\r
1987 void jackShutdown( void *infoPointer )
\r
1989 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1990 RtApiJack *object = (RtApiJack *) info->object;
\r
1992 // Check current stream state. If stopped, then we'll assume this
\r
1993 // was called as a result of a call to RtApiJack::stopStream (the
\r
1994 // deactivation of a client handle causes this function to be called).
\r
1995 // If not, we'll assume the Jack server is shutting down or some
\r
1996 // other problem occurred and we should close the stream.
\r
1997 if ( object->isStreamRunning() == false ) return;
\r
1999 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2000 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2003 int jackXrun( void *infoPointer )
\r
2005 JackHandle *handle = (JackHandle *) infoPointer;
\r
2007 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2008 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2013 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2014 unsigned int firstChannel, unsigned int sampleRate,
\r
2015 RtAudioFormat format, unsigned int *bufferSize,
\r
2016 RtAudio::StreamOptions *options )
\r
2018 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2020 // Look for jack server and try to become a client (only do once per stream).
\r
2021 jack_client_t *client = 0;
\r
2022 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2023 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2024 jack_status_t *status = NULL;
\r
2025 if ( options && !options->streamName.empty() )
\r
2026 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2028 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2029 if ( client == 0 ) {
\r
2030 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2031 error( RtError::WARNING );
\r
2036 // The handle must have been created on an earlier pass.
\r
2037 client = handle->client;
\r
2040 const char **ports;
\r
2041 std::string port, previousPort, deviceName;
\r
2042 unsigned int nPorts = 0, nDevices = 0;
\r
2043 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2045 // Parse the port names up to the first colon (:).
\r
2046 size_t iColon = 0;
\r
2048 port = (char *) ports[ nPorts ];
\r
2049 iColon = port.find(":");
\r
2050 if ( iColon != std::string::npos ) {
\r
2051 port = port.substr( 0, iColon );
\r
2052 if ( port != previousPort ) {
\r
2053 if ( nDevices == device ) deviceName = port;
\r
2055 previousPort = port;
\r
2058 } while ( ports[++nPorts] );
\r
2062 if ( device >= nDevices ) {
\r
2063 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2067 // Count the available ports containing the client name as device
\r
2068 // channels. Jack "input ports" equal RtAudio output channels.
\r
2069 unsigned int nChannels = 0;
\r
2070 unsigned long flag = JackPortIsInput;
\r
2071 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2072 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2074 while ( ports[ nChannels ] ) nChannels++;
\r
2078 // Compare the jack ports for specified client to the requested number of channels.
\r
2079 if ( nChannels < (channels + firstChannel) ) {
\r
2080 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2081 errorText_ = errorStream_.str();
\r
2085 // Check the jack server sample rate.
\r
2086 unsigned int jackRate = jack_get_sample_rate( client );
\r
2087 if ( sampleRate != jackRate ) {
\r
2088 jack_client_close( client );
\r
2089 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2090 errorText_ = errorStream_.str();
\r
2093 stream_.sampleRate = jackRate;
\r
2095 // Get the latency of the JACK port.
\r
2096 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2097 if ( ports[ firstChannel ] ) {
\r
2098 // Added by Ge Wang
\r
2099 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2100 // the range (usually the min and max are equal)
\r
2101 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2102 // get the latency range
\r
2103 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2104 // be optimistic, use the min!
\r
2105 stream_.latency[mode] = latrange.min;
\r
2106 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2110 // The jack server always uses 32-bit floating-point data.
\r
2111 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2112 stream_.userFormat = format;
\r
2114 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2115 else stream_.userInterleaved = true;
\r
2117 // Jack always uses non-interleaved buffers.
\r
2118 stream_.deviceInterleaved[mode] = false;
\r
2120 // Jack always provides host byte-ordered data.
\r
2121 stream_.doByteSwap[mode] = false;
\r
2123 // Get the buffer size. The buffer size and number of buffers
\r
2124 // (periods) is set when the jack server is started.
\r
2125 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2126 *bufferSize = stream_.bufferSize;
\r
2128 stream_.nDeviceChannels[mode] = channels;
\r
2129 stream_.nUserChannels[mode] = channels;
\r
2131 // Set flags for buffer conversion.
\r
2132 stream_.doConvertBuffer[mode] = false;
\r
2133 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2134 stream_.doConvertBuffer[mode] = true;
\r
2135 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2136 stream_.nUserChannels[mode] > 1 )
\r
2137 stream_.doConvertBuffer[mode] = true;
\r
2139 // Allocate our JackHandle structure for the stream.
\r
2140 if ( handle == 0 ) {
\r
2142 handle = new JackHandle;
\r
2144 catch ( std::bad_alloc& ) {
\r
2145 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2149 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2150 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2153 stream_.apiHandle = (void *) handle;
\r
2154 handle->client = client;
\r
2156 handle->deviceName[mode] = deviceName;
\r
2158 // Allocate necessary internal buffers.
\r
2159 unsigned long bufferBytes;
\r
2160 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2161 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2162 if ( stream_.userBuffer[mode] == NULL ) {
\r
2163 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2167 if ( stream_.doConvertBuffer[mode] ) {
\r
2169 bool makeBuffer = true;
\r
2170 if ( mode == OUTPUT )
\r
2171 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2172 else { // mode == INPUT
\r
2173 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2174 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2175 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2176 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2180 if ( makeBuffer ) {
\r
2181 bufferBytes *= *bufferSize;
\r
2182 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2183 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2184 if ( stream_.deviceBuffer == NULL ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2191 // Allocate memory for the Jack ports (channels) identifiers.
\r
2192 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2193 if ( handle->ports[mode] == NULL ) {
\r
2194 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2198 stream_.device[mode] = device;
\r
2199 stream_.channelOffset[mode] = firstChannel;
\r
2200 stream_.state = STREAM_STOPPED;
\r
2201 stream_.callbackInfo.object = (void *) this;
\r
2203 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2204 // We had already set up the stream for output.
\r
2205 stream_.mode = DUPLEX;
\r
2207 stream_.mode = mode;
\r
2208 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2209 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2210 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2213 // Register our ports.
\r
2215 if ( mode == OUTPUT ) {
\r
2216 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2217 snprintf( label, 64, "outport %d", i );
\r
2218 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2219 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2223 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2224 snprintf( label, 64, "inport %d", i );
\r
2225 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2226 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2230 // Setup the buffer conversion information structure. We don't use
\r
2231 // buffers to do channel offsets, so we override that parameter
\r
2233 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2239 pthread_cond_destroy( &handle->condition );
\r
2240 jack_client_close( handle->client );
\r
2242 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2243 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2246 stream_.apiHandle = 0;
\r
2249 for ( int i=0; i<2; i++ ) {
\r
2250 if ( stream_.userBuffer[i] ) {
\r
2251 free( stream_.userBuffer[i] );
\r
2252 stream_.userBuffer[i] = 0;
\r
2256 if ( stream_.deviceBuffer ) {
\r
2257 free( stream_.deviceBuffer );
\r
2258 stream_.deviceBuffer = 0;
\r
2264 void RtApiJack :: closeStream( void )
\r
2266 if ( stream_.state == STREAM_CLOSED ) {
\r
2267 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2268 error( RtError::WARNING );
\r
2272 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2275 if ( stream_.state == STREAM_RUNNING )
\r
2276 jack_deactivate( handle->client );
\r
2278 jack_client_close( handle->client );
\r
2282 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2283 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2284 pthread_cond_destroy( &handle->condition );
\r
2286 stream_.apiHandle = 0;
\r
2289 for ( int i=0; i<2; i++ ) {
\r
2290 if ( stream_.userBuffer[i] ) {
\r
2291 free( stream_.userBuffer[i] );
\r
2292 stream_.userBuffer[i] = 0;
\r
2296 if ( stream_.deviceBuffer ) {
\r
2297 free( stream_.deviceBuffer );
\r
2298 stream_.deviceBuffer = 0;
\r
2301 stream_.mode = UNINITIALIZED;
\r
2302 stream_.state = STREAM_CLOSED;
\r
2305 void RtApiJack :: startStream( void )
\r
2308 if ( stream_.state == STREAM_RUNNING ) {
\r
2309 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2310 error( RtError::WARNING );
\r
2314 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2315 int result = jack_activate( handle->client );
\r
2317 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2321 const char **ports;
\r
2323 // Get the list of available ports.
\r
2324 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2326 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2327 if ( ports == NULL) {
\r
2328 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2332 // Now make the port connections. Since RtAudio wasn't designed to
\r
2333 // allow the user to select particular channels of a device, we'll
\r
2334 // just open the first "nChannels" ports with offset.
\r
2335 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2337 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2338 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2341 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2348 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2350 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2351 if ( ports == NULL) {
\r
2352 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2356 // Now make the port connections. See note above.
\r
2357 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2359 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2360 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2363 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2370 handle->drainCounter = 0;
\r
2371 handle->internalDrain = false;
\r
2372 stream_.state = STREAM_RUNNING;
\r
2375 if ( result == 0 ) return;
\r
2376 error( RtError::SYSTEM_ERROR );
\r
2379 void RtApiJack :: stopStream( void )
\r
2382 if ( stream_.state == STREAM_STOPPED ) {
\r
2383 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2384 error( RtError::WARNING );
\r
2388 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2389 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2391 if ( handle->drainCounter == 0 ) {
\r
2392 handle->drainCounter = 2;
\r
2393 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2397 jack_deactivate( handle->client );
\r
2398 stream_.state = STREAM_STOPPED;
\r
2401 void RtApiJack :: abortStream( void )
\r
2404 if ( stream_.state == STREAM_STOPPED ) {
\r
2405 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2406 error( RtError::WARNING );
\r
2410 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2411 handle->drainCounter = 2;
\r
2416 // This function will be called by a spawned thread when the user
\r
2417 // callback function signals that the stream should be stopped or
\r
2418 // aborted. It is necessary to handle it this way because the
\r
2419 // callbackEvent() function must return before the jack_deactivate()
\r
2420 // function will return.
\r
2421 extern "C" void *jackStopStream( void *ptr )
\r
2423 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2424 RtApiJack *object = (RtApiJack *) info->object;
\r
2426 object->stopStream();
\r
2427 pthread_exit( NULL );
\r
2430 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2432 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2433 if ( stream_.state == STREAM_CLOSED ) {
\r
2434 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2435 error( RtError::WARNING );
\r
2438 if ( stream_.bufferSize != nframes ) {
\r
2439 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2440 error( RtError::WARNING );
\r
2444 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2445 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2447 // Check if we were draining the stream and signal is finished.
\r
2448 if ( handle->drainCounter > 3 ) {
\r
2450 stream_.state = STREAM_STOPPING;
\r
2451 if ( handle->internalDrain == true )
\r
2452 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2454 pthread_cond_signal( &handle->condition );
\r
2458 // Invoke user callback first, to get fresh output data.
\r
2459 if ( handle->drainCounter == 0 ) {
\r
2460 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2461 double streamTime = getStreamTime();
\r
2462 RtAudioStreamStatus status = 0;
\r
2463 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2464 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2465 handle->xrun[0] = false;
\r
2467 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2468 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2469 handle->xrun[1] = false;
\r
2471 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2472 stream_.bufferSize, streamTime, status, info->userData );
\r
2473 if ( cbReturnValue == 2 ) {
\r
2474 stream_.state = STREAM_STOPPING;
\r
2475 handle->drainCounter = 2;
\r
2477 pthread_create( &id, NULL, jackStopStream, info );
\r
2480 else if ( cbReturnValue == 1 ) {
\r
2481 handle->drainCounter = 1;
\r
2482 handle->internalDrain = true;
\r
2486 jack_default_audio_sample_t *jackbuffer;
\r
2487 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2490 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2492 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2493 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2494 memset( jackbuffer, 0, bufferBytes );
\r
2498 else if ( stream_.doConvertBuffer[0] ) {
\r
2500 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2502 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2503 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2504 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2507 else { // no buffer conversion
\r
2508 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2509 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2510 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2514 if ( handle->drainCounter ) {
\r
2515 handle->drainCounter++;
\r
2520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2522 if ( stream_.doConvertBuffer[1] ) {
\r
2523 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2524 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2525 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2527 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2529 else { // no buffer conversion
\r
2530 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2531 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2532 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2538 RtApi::tickStreamTime();
\r
2541 //******************** End of __UNIX_JACK__ *********************//
\r
2544 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2546 // The ASIO API is designed around a callback scheme, so this
\r
2547 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2548 // Jack. The primary constraint with ASIO is that it only allows
\r
2549 // access to a single driver at a time. Thus, it is not possible to
\r
2550 // have more than one simultaneous RtAudio stream.
\r
2552 // This implementation also requires a number of external ASIO files
\r
2553 // and a few global variables. The ASIO callback scheme does not
\r
2554 // allow for the passing of user data, so we must create a global
\r
2555 // pointer to our callbackInfo structure.
\r
2557 // On unix systems, we make use of a pthread condition variable.
\r
2558 // Since there is no equivalent in Windows, I hacked something based
\r
2559 // on information found in
\r
2560 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2562 #include "asiosys.h"
\r
2564 #include "iasiothiscallresolver.h"
\r
2565 #include "asiodrivers.h"
\r
2568 AsioDrivers drivers;
\r
2569 ASIOCallbacks asioCallbacks;
\r
2570 ASIODriverInfo driverInfo;
\r
2571 CallbackInfo *asioCallbackInfo;
\r
2574 struct AsioHandle {
\r
2575 int drainCounter; // Tracks callback counts when draining
\r
2576 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2577 ASIOBufferInfo *bufferInfos;
\r
2581 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2584 // Function declarations (definitions at end of section)
\r
2585 static const char* getAsioErrorString( ASIOError result );
\r
2586 void sampleRateChanged( ASIOSampleRate sRate );
\r
2587 long asioMessages( long selector, long value, void* message, double* opt );
\r
2589 RtApiAsio :: RtApiAsio()
\r
2591 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2592 // CoInitialize beforehand, but it must be for appartment threading
\r
2593 // (in which case, CoInitilialize will return S_FALSE here).
\r
2594 coInitialized_ = false;
\r
2595 HRESULT hr = CoInitialize( NULL );
\r
2596 if ( FAILED(hr) ) {
\r
2597 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2598 error( RtError::WARNING );
\r
2600 coInitialized_ = true;
\r
2602 drivers.removeCurrentDriver();
\r
2603 driverInfo.asioVersion = 2;
\r
2605 // See note in DirectSound implementation about GetDesktopWindow().
\r
2606 driverInfo.sysRef = GetForegroundWindow();
\r
2609 RtApiAsio :: ~RtApiAsio()
\r
2611 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2612 if ( coInitialized_ ) CoUninitialize();
\r
2615 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2617 return (unsigned int) drivers.asioGetNumDev();
\r
2620 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2622 RtAudio::DeviceInfo info;
\r
2623 info.probed = false;
\r
2626 unsigned int nDevices = getDeviceCount();
\r
2627 if ( nDevices == 0 ) {
\r
2628 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2629 error( RtError::INVALID_USE );
\r
2632 if ( device >= nDevices ) {
\r
2633 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2634 error( RtError::INVALID_USE );
\r
2637 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2638 if ( stream_.state != STREAM_CLOSED ) {
\r
2639 if ( device >= devices_.size() ) {
\r
2640 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2641 error( RtError::WARNING );
\r
2644 return devices_[ device ];
\r
2647 char driverName[32];
\r
2648 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2649 if ( result != ASE_OK ) {
\r
2650 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2651 errorText_ = errorStream_.str();
\r
2652 error( RtError::WARNING );
\r
2656 info.name = driverName;
\r
2658 if ( !drivers.loadDriver( driverName ) ) {
\r
2659 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2660 errorText_ = errorStream_.str();
\r
2661 error( RtError::WARNING );
\r
2665 result = ASIOInit( &driverInfo );
\r
2666 if ( result != ASE_OK ) {
\r
2667 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2668 errorText_ = errorStream_.str();
\r
2669 error( RtError::WARNING );
\r
2673 // Determine the device channel information.
\r
2674 long inputChannels, outputChannels;
\r
2675 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2676 if ( result != ASE_OK ) {
\r
2677 drivers.removeCurrentDriver();
\r
2678 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2679 errorText_ = errorStream_.str();
\r
2680 error( RtError::WARNING );
\r
2684 info.outputChannels = outputChannels;
\r
2685 info.inputChannels = inputChannels;
\r
2686 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2687 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2689 // Determine the supported sample rates.
\r
2690 info.sampleRates.clear();
\r
2691 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2692 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2693 if ( result == ASE_OK )
\r
2694 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2697 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2698 ASIOChannelInfo channelInfo;
\r
2699 channelInfo.channel = 0;
\r
2700 channelInfo.isInput = true;
\r
2701 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2702 result = ASIOGetChannelInfo( &channelInfo );
\r
2703 if ( result != ASE_OK ) {
\r
2704 drivers.removeCurrentDriver();
\r
2705 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2706 errorText_ = errorStream_.str();
\r
2707 error( RtError::WARNING );
\r
2711 info.nativeFormats = 0;
\r
2712 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2713 info.nativeFormats |= RTAUDIO_SINT16;
\r
2714 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2715 info.nativeFormats |= RTAUDIO_SINT32;
\r
2716 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2717 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2718 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2719 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2720 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2721 info.nativeFormats |= RTAUDIO_SINT24;
\r
2723 if ( info.outputChannels > 0 )
\r
2724 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2725 if ( info.inputChannels > 0 )
\r
2726 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2728 info.probed = true;
\r
2729 drivers.removeCurrentDriver();
\r
2733 void bufferSwitch( long index, ASIOBool processNow )
\r
2735 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2736 object->callbackEvent( index );
\r
2739 void RtApiAsio :: saveDeviceInfo( void )
\r
2743 unsigned int nDevices = getDeviceCount();
\r
2744 devices_.resize( nDevices );
\r
2745 for ( unsigned int i=0; i<nDevices; i++ )
\r
2746 devices_[i] = getDeviceInfo( i );
\r
2749 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2750 unsigned int firstChannel, unsigned int sampleRate,
\r
2751 RtAudioFormat format, unsigned int *bufferSize,
\r
2752 RtAudio::StreamOptions *options )
\r
2754 // For ASIO, a duplex stream MUST use the same driver.
\r
2755 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2756 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2760 char driverName[32];
\r
2761 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2762 if ( result != ASE_OK ) {
\r
2763 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2764 errorText_ = errorStream_.str();
\r
2768 // Only load the driver once for duplex stream.
\r
2769 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2770 // The getDeviceInfo() function will not work when a stream is open
\r
2771 // because ASIO does not allow multiple devices to run at the same
\r
2772 // time. Thus, we'll probe the system before opening a stream and
\r
2773 // save the results for use by getDeviceInfo().
\r
2774 this->saveDeviceInfo();
\r
2776 if ( !drivers.loadDriver( driverName ) ) {
\r
2777 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2782 result = ASIOInit( &driverInfo );
\r
2783 if ( result != ASE_OK ) {
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2790 // Check the device channel count.
\r
2791 long inputChannels, outputChannels;
\r
2792 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2793 if ( result != ASE_OK ) {
\r
2794 drivers.removeCurrentDriver();
\r
2795 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2800 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2801 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2802 drivers.removeCurrentDriver();
\r
2803 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2804 errorText_ = errorStream_.str();
\r
2807 stream_.nDeviceChannels[mode] = channels;
\r
2808 stream_.nUserChannels[mode] = channels;
\r
2809 stream_.channelOffset[mode] = firstChannel;
\r
2811 // Verify the sample rate is supported.
\r
2812 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2813 if ( result != ASE_OK ) {
\r
2814 drivers.removeCurrentDriver();
\r
2815 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2816 errorText_ = errorStream_.str();
\r
2820 // Get the current sample rate
\r
2821 ASIOSampleRate currentRate;
\r
2822 result = ASIOGetSampleRate( ¤tRate );
\r
2823 if ( result != ASE_OK ) {
\r
2824 drivers.removeCurrentDriver();
\r
2825 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2826 errorText_ = errorStream_.str();
\r
2830 // Set the sample rate only if necessary
\r
2831 if ( currentRate != sampleRate ) {
\r
2832 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2841 // Determine the driver data type.
\r
2842 ASIOChannelInfo channelInfo;
\r
2843 channelInfo.channel = 0;
\r
2844 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2845 else channelInfo.isInput = true;
\r
2846 result = ASIOGetChannelInfo( &channelInfo );
\r
2847 if ( result != ASE_OK ) {
\r
2848 drivers.removeCurrentDriver();
\r
2849 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2850 errorText_ = errorStream_.str();
\r
2854 // Assuming WINDOWS host is always little-endian.
\r
2855 stream_.doByteSwap[mode] = false;
\r
2856 stream_.userFormat = format;
\r
2857 stream_.deviceFormat[mode] = 0;
\r
2858 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2859 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2860 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2862 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2863 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2864 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2866 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2867 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2868 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2870 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2871 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2872 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2874 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2875 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2876 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2879 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2880 drivers.removeCurrentDriver();
\r
2881 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2882 errorText_ = errorStream_.str();
\r
2886 // Set the buffer size. For a duplex stream, this will end up
\r
2887 // setting the buffer size based on the input constraints, which
\r
2889 long minSize, maxSize, preferSize, granularity;
\r
2890 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2891 if ( result != ASE_OK ) {
\r
2892 drivers.removeCurrentDriver();
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2894 errorText_ = errorStream_.str();
\r
2898 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2899 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2900 else if ( granularity == -1 ) {
\r
2901 // Make sure bufferSize is a power of two.
\r
2902 int log2_of_min_size = 0;
\r
2903 int log2_of_max_size = 0;
\r
2905 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2906 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2907 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2910 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2911 int min_delta_num = log2_of_min_size;
\r
2913 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2914 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2915 if (current_delta < min_delta) {
\r
2916 min_delta = current_delta;
\r
2917 min_delta_num = i;
\r
2921 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2922 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2923 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2925 else if ( granularity != 0 ) {
\r
2926 // Set to an even multiple of granularity, rounding up.
\r
2927 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2930 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2931 drivers.removeCurrentDriver();
\r
2932 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2936 stream_.bufferSize = *bufferSize;
\r
2937 stream_.nBuffers = 2;
\r
2939 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2940 else stream_.userInterleaved = true;
\r
2942 // ASIO always uses non-interleaved buffers.
\r
2943 stream_.deviceInterleaved[mode] = false;
\r
2945 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2946 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2947 if ( handle == 0 ) {
\r
2949 handle = new AsioHandle;
\r
2951 catch ( std::bad_alloc& ) {
\r
2952 //if ( handle == NULL ) {
\r
2953 drivers.removeCurrentDriver();
\r
2954 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2957 handle->bufferInfos = 0;
\r
2959 // Create a manual-reset event.
\r
2960 handle->condition = CreateEvent( NULL, // no security
\r
2961 TRUE, // manual-reset
\r
2962 FALSE, // non-signaled initially
\r
2963 NULL ); // unnamed
\r
2964 stream_.apiHandle = (void *) handle;
\r
2967 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2968 // and output separately, we'll have to dispose of previously
\r
2969 // created output buffers for a duplex stream.
\r
2970 long inputLatency, outputLatency;
\r
2971 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2972 ASIODisposeBuffers();
\r
2973 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2976 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2977 bool buffersAllocated = false;
\r
2978 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2979 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2980 if ( handle->bufferInfos == NULL ) {
\r
2981 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2982 errorText_ = errorStream_.str();
\r
2986 ASIOBufferInfo *infos;
\r
2987 infos = handle->bufferInfos;
\r
2988 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2989 infos->isInput = ASIOFalse;
\r
2990 infos->channelNum = i + stream_.channelOffset[0];
\r
2991 infos->buffers[0] = infos->buffers[1] = 0;
\r
2993 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2994 infos->isInput = ASIOTrue;
\r
2995 infos->channelNum = i + stream_.channelOffset[1];
\r
2996 infos->buffers[0] = infos->buffers[1] = 0;
\r
2999 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3000 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3001 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3002 asioCallbacks.asioMessage = &asioMessages;
\r
3003 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3004 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3005 if ( result != ASE_OK ) {
\r
3006 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3007 errorText_ = errorStream_.str();
\r
3010 buffersAllocated = true;
\r
3012 // Set flags for buffer conversion.
\r
3013 stream_.doConvertBuffer[mode] = false;
\r
3014 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3015 stream_.doConvertBuffer[mode] = true;
\r
3016 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3017 stream_.nUserChannels[mode] > 1 )
\r
3018 stream_.doConvertBuffer[mode] = true;
\r
3020 // Allocate necessary internal buffers
\r
3021 unsigned long bufferBytes;
\r
3022 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3023 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3024 if ( stream_.userBuffer[mode] == NULL ) {
\r
3025 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3029 if ( stream_.doConvertBuffer[mode] ) {
\r
3031 bool makeBuffer = true;
\r
3032 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3033 if ( mode == INPUT ) {
\r
3034 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3035 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3036 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3040 if ( makeBuffer ) {
\r
3041 bufferBytes *= *bufferSize;
\r
3042 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3043 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3044 if ( stream_.deviceBuffer == NULL ) {
\r
3045 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3051 stream_.sampleRate = sampleRate;
\r
3052 stream_.device[mode] = device;
\r
3053 stream_.state = STREAM_STOPPED;
\r
3054 asioCallbackInfo = &stream_.callbackInfo;
\r
3055 stream_.callbackInfo.object = (void *) this;
\r
3056 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3057 // We had already set up an output stream.
\r
3058 stream_.mode = DUPLEX;
\r
3060 stream_.mode = mode;
\r
3062 // Determine device latencies
\r
3063 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3064 if ( result != ASE_OK ) {
\r
3065 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3066 errorText_ = errorStream_.str();
\r
3067 error( RtError::WARNING); // warn but don't fail
\r
3070 stream_.latency[0] = outputLatency;
\r
3071 stream_.latency[1] = inputLatency;
\r
3074 // Setup the buffer conversion information structure. We don't use
\r
3075 // buffers to do channel offsets, so we override that parameter
\r
3077 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3082 if ( buffersAllocated )
\r
3083 ASIODisposeBuffers();
\r
3084 drivers.removeCurrentDriver();
\r
3087 CloseHandle( handle->condition );
\r
3088 if ( handle->bufferInfos )
\r
3089 free( handle->bufferInfos );
\r
3091 stream_.apiHandle = 0;
\r
3094 for ( int i=0; i<2; i++ ) {
\r
3095 if ( stream_.userBuffer[i] ) {
\r
3096 free( stream_.userBuffer[i] );
\r
3097 stream_.userBuffer[i] = 0;
\r
3101 if ( stream_.deviceBuffer ) {
\r
3102 free( stream_.deviceBuffer );
\r
3103 stream_.deviceBuffer = 0;
\r
3109 void RtApiAsio :: closeStream()
\r
3111 if ( stream_.state == STREAM_CLOSED ) {
\r
3112 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3113 error( RtError::WARNING );
\r
3117 if ( stream_.state == STREAM_RUNNING ) {
\r
3118 stream_.state = STREAM_STOPPED;
\r
3121 ASIODisposeBuffers();
\r
3122 drivers.removeCurrentDriver();
\r
3124 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3126 CloseHandle( handle->condition );
\r
3127 if ( handle->bufferInfos )
\r
3128 free( handle->bufferInfos );
\r
3130 stream_.apiHandle = 0;
\r
3133 for ( int i=0; i<2; i++ ) {
\r
3134 if ( stream_.userBuffer[i] ) {
\r
3135 free( stream_.userBuffer[i] );
\r
3136 stream_.userBuffer[i] = 0;
\r
3140 if ( stream_.deviceBuffer ) {
\r
3141 free( stream_.deviceBuffer );
\r
3142 stream_.deviceBuffer = 0;
\r
3145 stream_.mode = UNINITIALIZED;
\r
3146 stream_.state = STREAM_CLOSED;
\r
3149 bool stopThreadCalled = false;
\r
3151 void RtApiAsio :: startStream()
\r
3154 if ( stream_.state == STREAM_RUNNING ) {
\r
3155 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3156 error( RtError::WARNING );
\r
3160 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3161 ASIOError result = ASIOStart();
\r
3162 if ( result != ASE_OK ) {
\r
3163 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3164 errorText_ = errorStream_.str();
\r
3168 handle->drainCounter = 0;
\r
3169 handle->internalDrain = false;
\r
3170 ResetEvent( handle->condition );
\r
3171 stream_.state = STREAM_RUNNING;
\r
3175 stopThreadCalled = false;
\r
3177 if ( result == ASE_OK ) return;
\r
3178 error( RtError::SYSTEM_ERROR );
\r
3181 void RtApiAsio :: stopStream()
\r
3184 if ( stream_.state == STREAM_STOPPED ) {
\r
3185 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3186 error( RtError::WARNING );
\r
3190 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3191 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3192 if ( handle->drainCounter == 0 ) {
\r
3193 handle->drainCounter = 2;
\r
3194 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3198 stream_.state = STREAM_STOPPED;
\r
3200 ASIOError result = ASIOStop();
\r
3201 if ( result != ASE_OK ) {
\r
3202 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3203 errorText_ = errorStream_.str();
\r
3206 if ( result == ASE_OK ) return;
\r
3207 error( RtError::SYSTEM_ERROR );
\r
3210 void RtApiAsio :: abortStream()
\r
3213 if ( stream_.state == STREAM_STOPPED ) {
\r
3214 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3215 error( RtError::WARNING );
\r
3219 // The following lines were commented-out because some behavior was
\r
3220 // noted where the device buffers need to be zeroed to avoid
\r
3221 // continuing sound, even when the device buffers are completely
\r
3222 // disposed. So now, calling abort is the same as calling stop.
\r
3223 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3224 // handle->drainCounter = 2;
\r
3228 // This function will be called by a spawned thread when the user
\r
3229 // callback function signals that the stream should be stopped or
\r
3230 // aborted. It is necessary to handle it this way because the
\r
3231 // callbackEvent() function must return before the ASIOStop()
\r
3232 // function will return.
\r
3233 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3235 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3236 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3238 object->stopStream();
\r
3239 _endthreadex( 0 );
\r
3243 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3245 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3246 if ( stream_.state == STREAM_CLOSED ) {
\r
3247 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3248 error( RtError::WARNING );
\r
3252 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3253 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3255 // Check if we were draining the stream and signal if finished.
\r
3256 if ( handle->drainCounter > 3 ) {
\r
3258 stream_.state = STREAM_STOPPING;
\r
3259 if ( handle->internalDrain == false )
\r
3260 SetEvent( handle->condition );
\r
3261 else { // spawn a thread to stop the stream
\r
3262 unsigned threadId;
\r
3263 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3264 &stream_.callbackInfo, 0, &threadId );
\r
3269 // Invoke user callback to get fresh output data UNLESS we are
\r
3270 // draining stream.
\r
3271 if ( handle->drainCounter == 0 ) {
\r
3272 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3273 double streamTime = getStreamTime();
\r
3274 RtAudioStreamStatus status = 0;
\r
3275 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3276 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3279 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3280 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3283 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3284 stream_.bufferSize, streamTime, status, info->userData );
\r
3285 if ( cbReturnValue == 2 ) {
\r
3286 stream_.state = STREAM_STOPPING;
\r
3287 handle->drainCounter = 2;
\r
3288 unsigned threadId;
\r
3289 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3290 &stream_.callbackInfo, 0, &threadId );
\r
3293 else if ( cbReturnValue == 1 ) {
\r
3294 handle->drainCounter = 1;
\r
3295 handle->internalDrain = true;
\r
3299 unsigned int nChannels, bufferBytes, i, j;
\r
3300 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3301 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3303 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3305 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3307 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3308 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3309 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3313 else if ( stream_.doConvertBuffer[0] ) {
\r
3315 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3316 if ( stream_.doByteSwap[0] )
\r
3317 byteSwapBuffer( stream_.deviceBuffer,
\r
3318 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3319 stream_.deviceFormat[0] );
\r
3321 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3322 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3323 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3324 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3330 if ( stream_.doByteSwap[0] )
\r
3331 byteSwapBuffer( stream_.userBuffer[0],
\r
3332 stream_.bufferSize * stream_.nUserChannels[0],
\r
3333 stream_.userFormat );
\r
3335 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3336 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3337 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3338 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3343 if ( handle->drainCounter ) {
\r
3344 handle->drainCounter++;
\r
3349 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3351 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3353 if (stream_.doConvertBuffer[1]) {
\r
3355 // Always interleave ASIO input data.
\r
3356 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3357 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3358 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3359 handle->bufferInfos[i].buffers[bufferIndex],
\r
3363 if ( stream_.doByteSwap[1] )
\r
3364 byteSwapBuffer( stream_.deviceBuffer,
\r
3365 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3366 stream_.deviceFormat[1] );
\r
3367 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3373 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3374 handle->bufferInfos[i].buffers[bufferIndex],
\r
3379 if ( stream_.doByteSwap[1] )
\r
3380 byteSwapBuffer( stream_.userBuffer[1],
\r
3381 stream_.bufferSize * stream_.nUserChannels[1],
\r
3382 stream_.userFormat );
\r
3387 // The following call was suggested by Malte Clasen. While the API
\r
3388 // documentation indicates it should not be required, some device
\r
3389 // drivers apparently do not function correctly without it.
\r
3390 ASIOOutputReady();
\r
3392 RtApi::tickStreamTime();
\r
3396 void sampleRateChanged( ASIOSampleRate sRate )
\r
3398 // The ASIO documentation says that this usually only happens during
\r
3399 // external sync. Audio processing is not stopped by the driver,
\r
3400 // actual sample rate might not have even changed, maybe only the
\r
3401 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3404 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3406 object->stopStream();
\r
3408 catch ( RtError &exception ) {
\r
3409 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3413 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3416 long asioMessages( long selector, long value, void* message, double* opt )
\r
3420 switch( selector ) {
\r
3421 case kAsioSelectorSupported:
\r
3422 if ( value == kAsioResetRequest
\r
3423 || value == kAsioEngineVersion
\r
3424 || value == kAsioResyncRequest
\r
3425 || value == kAsioLatenciesChanged
\r
3426 // The following three were added for ASIO 2.0, you don't
\r
3427 // necessarily have to support them.
\r
3428 || value == kAsioSupportsTimeInfo
\r
3429 || value == kAsioSupportsTimeCode
\r
3430 || value == kAsioSupportsInputMonitor)
\r
3433 case kAsioResetRequest:
\r
3434 // Defer the task and perform the reset of the driver during the
\r
3435 // next "safe" situation. You cannot reset the driver right now,
\r
3436 // as this code is called from the driver. Reset the driver is
\r
3437 // done by completely destruct is. I.e. ASIOStop(),
\r
3438 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3440 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3443 case kAsioResyncRequest:
\r
3444 // This informs the application that the driver encountered some
\r
3445 // non-fatal data loss. It is used for synchronization purposes
\r
3446 // of different media. Added mainly to work around the Win16Mutex
\r
3447 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3448 // which could lose data because the Mutex was held too long by
\r
3449 // another thread. However a driver can issue it in other
\r
3450 // situations, too.
\r
3451 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3455 case kAsioLatenciesChanged:
\r
3456 // This will inform the host application that the drivers were
\r
3457 // latencies changed. Beware, it this does not mean that the
\r
3458 // buffer sizes have changed! You might need to update internal
\r
3460 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3463 case kAsioEngineVersion:
\r
3464 // Return the supported ASIO version of the host application. If
\r
3465 // a host application does not implement this selector, ASIO 1.0
\r
3466 // is assumed by the driver.
\r
3469 case kAsioSupportsTimeInfo:
\r
3470 // Informs the driver whether the
\r
3471 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3472 // For compatibility with ASIO 1.0 drivers the host application
\r
3473 // should always support the "old" bufferSwitch method, too.
\r
3476 case kAsioSupportsTimeCode:
\r
3477 // Informs the driver whether application is interested in time
\r
3478 // code info. If an application does not need to know about time
\r
3479 // code, the driver has less work to do.
\r
3486 static const char* getAsioErrorString( ASIOError result )
\r
3491 const char*message;
\r
3494 static Messages m[] =
\r
3496 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3497 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3498 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3499 { ASE_InvalidMode, "Invalid mode." },
\r
3500 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3501 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3502 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3505 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3506 if ( m[i].value == result ) return m[i].message;
\r
3508 return "Unknown error.";
\r
3510 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3514 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3516 // Modified by Robin Davies, October 2005
\r
3517 // - Improvements to DirectX pointer chasing.
\r
3518 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3519 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3520 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3521 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3523 #include <dsound.h>
\r
3524 #include <assert.h>
\r
3525 #include <algorithm>
\r
3527 #if defined(__MINGW32__)
\r
3528 // missing from latest mingw winapi
\r
3529 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3530 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3531 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3532 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3535 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3537 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3538 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3541 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3543 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3544 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3545 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3546 return pointer >= earlierPointer && pointer < laterPointer;
\r
3549 // A structure to hold various information related to the DirectSound
\r
3550 // API implementation.
\r
3552 unsigned int drainCounter; // Tracks callback counts when draining
\r
3553 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3557 UINT bufferPointer[2];
\r
3558 DWORD dsBufferSize[2];
\r
3559 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3563 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3566 // Declarations for utility functions, callbacks, and structures
\r
3567 // specific to the DirectSound implementation.
\r
3568 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3569 LPCTSTR description,
\r
3571 LPVOID lpContext );
\r
3573 static const char* getErrorString( int code );
\r
3575 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3584 : found(false) { validId[0] = false; validId[1] = false; }
\r
3587 std::vector< DsDevice > dsDevices;
\r
3589 RtApiDs :: RtApiDs()
\r
3591 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3592 // accept whatever the mainline chose for a threading model.
\r
3593 coInitialized_ = false;
\r
3594 HRESULT hr = CoInitialize( NULL );
\r
3595 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3598 RtApiDs :: ~RtApiDs()
\r
3600 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3601 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3604 // The DirectSound default output is always the first device.
\r
3605 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3610 // The DirectSound default input is always the first input device,
\r
3611 // which is the first capture device enumerated.
\r
3612 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3617 unsigned int RtApiDs :: getDeviceCount( void )
\r
3619 // Set query flag for previously found devices to false, so that we
\r
3620 // can check for any devices that have disappeared.
\r
3621 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3622 dsDevices[i].found = false;
\r
3624 // Query DirectSound devices.
\r
3625 bool isInput = false;
\r
3626 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3627 if ( FAILED( result ) ) {
\r
3628 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3629 errorText_ = errorStream_.str();
\r
3630 error( RtError::WARNING );
\r
3633 // Query DirectSoundCapture devices.
\r
3635 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3636 if ( FAILED( result ) ) {
\r
3637 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3638 errorText_ = errorStream_.str();
\r
3639 error( RtError::WARNING );
\r
3642 // Clean out any devices that may have disappeared.
\r
3643 std::vector< int > indices;
\r
3644 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3645 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3646 unsigned int nErased = 0;
\r
3647 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3648 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3650 return dsDevices.size();
\r
3653 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3655 RtAudio::DeviceInfo info;
\r
3656 info.probed = false;
\r
3658 if ( dsDevices.size() == 0 ) {
\r
3659 // Force a query of all devices
\r
3661 if ( dsDevices.size() == 0 ) {
\r
3662 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3663 error( RtError::INVALID_USE );
\r
3667 if ( device >= dsDevices.size() ) {
\r
3668 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3669 error( RtError::INVALID_USE );
\r
3673 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3675 LPDIRECTSOUND output;
\r
3677 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3678 if ( FAILED( result ) ) {
\r
3679 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3680 errorText_ = errorStream_.str();
\r
3681 error( RtError::WARNING );
\r
3685 outCaps.dwSize = sizeof( outCaps );
\r
3686 result = output->GetCaps( &outCaps );
\r
3687 if ( FAILED( result ) ) {
\r
3688 output->Release();
\r
3689 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3690 errorText_ = errorStream_.str();
\r
3691 error( RtError::WARNING );
\r
3695 // Get output channel information.
\r
3696 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3698 // Get sample rate information.
\r
3699 info.sampleRates.clear();
\r
3700 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3701 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3702 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3703 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3706 // Get format information.
\r
3707 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3708 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3710 output->Release();
\r
3712 if ( getDefaultOutputDevice() == device )
\r
3713 info.isDefaultOutput = true;
\r
3715 if ( dsDevices[ device ].validId[1] == false ) {
\r
3716 info.name = dsDevices[ device ].name;
\r
3717 info.probed = true;
\r
3723 LPDIRECTSOUNDCAPTURE input;
\r
3724 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3725 if ( FAILED( result ) ) {
\r
3726 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3727 errorText_ = errorStream_.str();
\r
3728 error( RtError::WARNING );
\r
3733 inCaps.dwSize = sizeof( inCaps );
\r
3734 result = input->GetCaps( &inCaps );
\r
3735 if ( FAILED( result ) ) {
\r
3737 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3738 errorText_ = errorStream_.str();
\r
3739 error( RtError::WARNING );
\r
3743 // Get input channel information.
\r
3744 info.inputChannels = inCaps.dwChannels;
\r
3746 // Get sample rate and format information.
\r
3747 std::vector<unsigned int> rates;
\r
3748 if ( inCaps.dwChannels >= 2 ) {
\r
3749 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3750 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3751 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3752 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3753 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3754 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3755 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3756 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3758 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3759 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3760 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3761 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3762 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3764 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3765 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3766 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3767 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3768 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3771 else if ( inCaps.dwChannels == 1 ) {
\r
3772 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3775 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3776 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3777 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3781 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3787 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3791 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3794 else info.inputChannels = 0; // technically, this would be an error
\r
3798 if ( info.inputChannels == 0 ) return info;
\r
3800 // Copy the supported rates to the info structure but avoid duplication.
\r
3802 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3804 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3805 if ( rates[i] == info.sampleRates[j] ) {
\r
3810 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3812 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3814 // If device opens for both playback and capture, we determine the channels.
\r
3815 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3816 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3818 if ( device == 0 ) info.isDefaultInput = true;
\r
3820 // Copy name and return.
\r
3821 info.name = dsDevices[ device ].name;
\r
3822 info.probed = true;
\r
3826 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3827 unsigned int firstChannel, unsigned int sampleRate,
\r
3828 RtAudioFormat format, unsigned int *bufferSize,
\r
3829 RtAudio::StreamOptions *options )
\r
3831 if ( channels + firstChannel > 2 ) {
\r
3832 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3836 unsigned int nDevices = dsDevices.size();
\r
3837 if ( nDevices == 0 ) {
\r
3838 // This should not happen because a check is made before this function is called.
\r
3839 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3843 if ( device >= nDevices ) {
\r
3844 // This should not happen because a check is made before this function is called.
\r
3845 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3849 if ( mode == OUTPUT ) {
\r
3850 if ( dsDevices[ device ].validId[0] == false ) {
\r
3851 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3852 errorText_ = errorStream_.str();
\r
3856 else { // mode == INPUT
\r
3857 if ( dsDevices[ device ].validId[1] == false ) {
\r
3858 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3859 errorText_ = errorStream_.str();
\r
3864 // According to a note in PortAudio, using GetDesktopWindow()
\r
3865 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3866 // that occur when the application's window is not the foreground
\r
3867 // window. Also, if the application window closes before the
\r
3868 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3869 // problems when using GetDesktopWindow() but it seems fine now
\r
3870 // (January 2010). I'll leave it commented here.
\r
3871 // HWND hWnd = GetForegroundWindow();
\r
3872 HWND hWnd = GetDesktopWindow();
\r
3874 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3875 // two. This is a judgement call and a value of two is probably too
\r
3876 // low for capture, but it should work for playback.
\r
3878 if ( options ) nBuffers = options->numberOfBuffers;
\r
3879 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3880 if ( nBuffers < 2 ) nBuffers = 3;
\r
3882 // Check the lower range of the user-specified buffer size and set
\r
3883 // (arbitrarily) to a lower bound of 32.
\r
3884 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3886 // Create the wave format structure. The data format setting will
\r
3887 // be determined later.
\r
3888 WAVEFORMATEX waveFormat;
\r
3889 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3890 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3891 waveFormat.nChannels = channels + firstChannel;
\r
3892 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3894 // Determine the device buffer size. By default, we'll use the value
\r
3895 // defined above (32K), but we will grow it to make allowances for
\r
3896 // very large software buffer sizes.
\r
3897 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
3898 DWORD dsPointerLeadTime = 0;
\r
3900 void *ohandle = 0, *bhandle = 0;
\r
3902 if ( mode == OUTPUT ) {
\r
3904 LPDIRECTSOUND output;
\r
3905 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3906 if ( FAILED( result ) ) {
\r
3907 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3908 errorText_ = errorStream_.str();
\r
3913 outCaps.dwSize = sizeof( outCaps );
\r
3914 result = output->GetCaps( &outCaps );
\r
3915 if ( FAILED( result ) ) {
\r
3916 output->Release();
\r
3917 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3918 errorText_ = errorStream_.str();
\r
3922 // Check channel information.
\r
3923 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3924 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3925 errorText_ = errorStream_.str();
\r
3929 // Check format information. Use 16-bit format unless not
\r
3930 // supported or user requests 8-bit.
\r
3931 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3932 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3933 waveFormat.wBitsPerSample = 16;
\r
3934 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3937 waveFormat.wBitsPerSample = 8;
\r
3938 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3940 stream_.userFormat = format;
\r
3942 // Update wave format structure and buffer information.
\r
3943 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3944 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3945 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3947 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3948 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3949 dsBufferSize *= 2;
\r
3951 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3952 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3953 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3954 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3955 if ( FAILED( result ) ) {
\r
3956 output->Release();
\r
3957 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3958 errorText_ = errorStream_.str();
\r
3962 // Even though we will write to the secondary buffer, we need to
\r
3963 // access the primary buffer to set the correct output format
\r
3964 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3965 // buffer description.
\r
3966 DSBUFFERDESC bufferDescription;
\r
3967 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3968 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3969 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3971 // Obtain the primary buffer
\r
3972 LPDIRECTSOUNDBUFFER buffer;
\r
3973 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3974 if ( FAILED( result ) ) {
\r
3975 output->Release();
\r
3976 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3977 errorText_ = errorStream_.str();
\r
3981 // Set the primary DS buffer sound format.
\r
3982 result = buffer->SetFormat( &waveFormat );
\r
3983 if ( FAILED( result ) ) {
\r
3984 output->Release();
\r
3985 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
3986 errorText_ = errorStream_.str();
\r
3990 // Setup the secondary DS buffer description.
\r
3991 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3992 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3993 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
3994 DSBCAPS_GLOBALFOCUS |
\r
3995 DSBCAPS_GETCURRENTPOSITION2 |
\r
3996 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
3997 bufferDescription.dwBufferBytes = dsBufferSize;
\r
3998 bufferDescription.lpwfxFormat = &waveFormat;
\r
4000 // Try to create the secondary DS buffer. If that doesn't work,
\r
4001 // try to use software mixing. Otherwise, there's a problem.
\r
4002 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4003 if ( FAILED( result ) ) {
\r
4004 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4005 DSBCAPS_GLOBALFOCUS |
\r
4006 DSBCAPS_GETCURRENTPOSITION2 |
\r
4007 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4008 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4009 if ( FAILED( result ) ) {
\r
4010 output->Release();
\r
4011 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4012 errorText_ = errorStream_.str();
\r
4017 // Get the buffer size ... might be different from what we specified.
\r
4019 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4020 result = buffer->GetCaps( &dsbcaps );
\r
4021 if ( FAILED( result ) ) {
\r
4022 output->Release();
\r
4023 buffer->Release();
\r
4024 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4025 errorText_ = errorStream_.str();
\r
4029 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4031 // Lock the DS buffer
\r
4034 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4035 if ( FAILED( result ) ) {
\r
4036 output->Release();
\r
4037 buffer->Release();
\r
4038 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4039 errorText_ = errorStream_.str();
\r
4043 // Zero the DS buffer
\r
4044 ZeroMemory( audioPtr, dataLen );
\r
4046 // Unlock the DS buffer
\r
4047 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4048 if ( FAILED( result ) ) {
\r
4049 output->Release();
\r
4050 buffer->Release();
\r
4051 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4052 errorText_ = errorStream_.str();
\r
4056 ohandle = (void *) output;
\r
4057 bhandle = (void *) buffer;
\r
4060 if ( mode == INPUT ) {
\r
4062 LPDIRECTSOUNDCAPTURE input;
\r
4063 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4064 if ( FAILED( result ) ) {
\r
4065 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4066 errorText_ = errorStream_.str();
\r
4071 inCaps.dwSize = sizeof( inCaps );
\r
4072 result = input->GetCaps( &inCaps );
\r
4073 if ( FAILED( result ) ) {
\r
4075 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4076 errorText_ = errorStream_.str();
\r
4080 // Check channel information.
\r
4081 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4082 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4086 // Check format information. Use 16-bit format unless user
\r
4087 // requests 8-bit.
\r
4088 DWORD deviceFormats;
\r
4089 if ( channels + firstChannel == 2 ) {
\r
4090 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4091 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4092 waveFormat.wBitsPerSample = 8;
\r
4093 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4095 else { // assume 16-bit is supported
\r
4096 waveFormat.wBitsPerSample = 16;
\r
4097 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4100 else { // channel == 1
\r
4101 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4102 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4103 waveFormat.wBitsPerSample = 8;
\r
4104 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4106 else { // assume 16-bit is supported
\r
4107 waveFormat.wBitsPerSample = 16;
\r
4108 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4111 stream_.userFormat = format;
\r
4113 // Update wave format structure and buffer information.
\r
4114 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4115 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4116 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4118 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4119 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4120 dsBufferSize *= 2;
\r
4122 // Setup the secondary DS buffer description.
\r
4123 DSCBUFFERDESC bufferDescription;
\r
4124 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4125 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4126 bufferDescription.dwFlags = 0;
\r
4127 bufferDescription.dwReserved = 0;
\r
4128 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4129 bufferDescription.lpwfxFormat = &waveFormat;
\r
4131 // Create the capture buffer.
\r
4132 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4133 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4134 if ( FAILED( result ) ) {
\r
4136 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4137 errorText_ = errorStream_.str();
\r
4141 // Get the buffer size ... might be different from what we specified.
\r
4142 DSCBCAPS dscbcaps;
\r
4143 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4144 result = buffer->GetCaps( &dscbcaps );
\r
4145 if ( FAILED( result ) ) {
\r
4147 buffer->Release();
\r
4148 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4149 errorText_ = errorStream_.str();
\r
4153 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4155 // NOTE: We could have a problem here if this is a duplex stream
\r
4156 // and the play and capture hardware buffer sizes are different
\r
4157 // (I'm actually not sure if that is a problem or not).
\r
4158 // Currently, we are not verifying that.
\r
4160 // Lock the capture buffer
\r
4163 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4164 if ( FAILED( result ) ) {
\r
4166 buffer->Release();
\r
4167 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4168 errorText_ = errorStream_.str();
\r
4172 // Zero the buffer
\r
4173 ZeroMemory( audioPtr, dataLen );
\r
4175 // Unlock the buffer
\r
4176 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4177 if ( FAILED( result ) ) {
\r
4179 buffer->Release();
\r
4180 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4181 errorText_ = errorStream_.str();
\r
4185 ohandle = (void *) input;
\r
4186 bhandle = (void *) buffer;
\r
4189 // Set various stream parameters
\r
4190 DsHandle *handle = 0;
\r
4191 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4192 stream_.nUserChannels[mode] = channels;
\r
4193 stream_.bufferSize = *bufferSize;
\r
4194 stream_.channelOffset[mode] = firstChannel;
\r
4195 stream_.deviceInterleaved[mode] = true;
\r
4196 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4197 else stream_.userInterleaved = true;
\r
4199 // Set flag for buffer conversion
\r
4200 stream_.doConvertBuffer[mode] = false;
\r
4201 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4202 stream_.doConvertBuffer[mode] = true;
\r
4203 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4204 stream_.doConvertBuffer[mode] = true;
\r
4205 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4206 stream_.nUserChannels[mode] > 1 )
\r
4207 stream_.doConvertBuffer[mode] = true;
\r
4209 // Allocate necessary internal buffers
\r
4210 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4211 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4212 if ( stream_.userBuffer[mode] == NULL ) {
\r
4213 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4217 if ( stream_.doConvertBuffer[mode] ) {
\r
4219 bool makeBuffer = true;
\r
4220 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4221 if ( mode == INPUT ) {
\r
4222 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4223 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4224 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4228 if ( makeBuffer ) {
\r
4229 bufferBytes *= *bufferSize;
\r
4230 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4231 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4232 if ( stream_.deviceBuffer == NULL ) {
\r
4233 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4239 // Allocate our DsHandle structures for the stream.
\r
4240 if ( stream_.apiHandle == 0 ) {
\r
4242 handle = new DsHandle;
\r
4244 catch ( std::bad_alloc& ) {
\r
4245 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4249 // Create a manual-reset event.
\r
4250 handle->condition = CreateEvent( NULL, // no security
\r
4251 TRUE, // manual-reset
\r
4252 FALSE, // non-signaled initially
\r
4253 NULL ); // unnamed
\r
4254 stream_.apiHandle = (void *) handle;
\r
4257 handle = (DsHandle *) stream_.apiHandle;
\r
4258 handle->id[mode] = ohandle;
\r
4259 handle->buffer[mode] = bhandle;
\r
4260 handle->dsBufferSize[mode] = dsBufferSize;
\r
4261 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4263 stream_.device[mode] = device;
\r
4264 stream_.state = STREAM_STOPPED;
\r
4265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4266 // We had already set up an output stream.
\r
4267 stream_.mode = DUPLEX;
\r
4269 stream_.mode = mode;
\r
4270 stream_.nBuffers = nBuffers;
\r
4271 stream_.sampleRate = sampleRate;
\r
4273 // Setup the buffer conversion information structure.
\r
4274 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4276 // Setup the callback thread.
\r
4277 if ( stream_.callbackInfo.isRunning == false ) {
\r
4278 unsigned threadId;
\r
4279 stream_.callbackInfo.isRunning = true;
\r
4280 stream_.callbackInfo.object = (void *) this;
\r
4281 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4282 &stream_.callbackInfo, 0, &threadId );
\r
4283 if ( stream_.callbackInfo.thread == 0 ) {
\r
4284 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4288 // Boost DS thread priority
\r
4289 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4295 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4296 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4297 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4298 if ( buffer ) buffer->Release();
\r
4299 object->Release();
\r
4301 if ( handle->buffer[1] ) {
\r
4302 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4303 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4304 if ( buffer ) buffer->Release();
\r
4305 object->Release();
\r
4307 CloseHandle( handle->condition );
\r
4309 stream_.apiHandle = 0;
\r
4312 for ( int i=0; i<2; i++ ) {
\r
4313 if ( stream_.userBuffer[i] ) {
\r
4314 free( stream_.userBuffer[i] );
\r
4315 stream_.userBuffer[i] = 0;
\r
4319 if ( stream_.deviceBuffer ) {
\r
4320 free( stream_.deviceBuffer );
\r
4321 stream_.deviceBuffer = 0;
\r
4327 void RtApiDs :: closeStream()
\r
4329 if ( stream_.state == STREAM_CLOSED ) {
\r
4330 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4331 error( RtError::WARNING );
\r
4335 // Stop the callback thread.
\r
4336 stream_.callbackInfo.isRunning = false;
\r
4337 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4338 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4340 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4342 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4343 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4344 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4347 buffer->Release();
\r
4349 object->Release();
\r
4351 if ( handle->buffer[1] ) {
\r
4352 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4353 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4356 buffer->Release();
\r
4358 object->Release();
\r
4360 CloseHandle( handle->condition );
\r
4362 stream_.apiHandle = 0;
\r
4365 for ( int i=0; i<2; i++ ) {
\r
4366 if ( stream_.userBuffer[i] ) {
\r
4367 free( stream_.userBuffer[i] );
\r
4368 stream_.userBuffer[i] = 0;
\r
4372 if ( stream_.deviceBuffer ) {
\r
4373 free( stream_.deviceBuffer );
\r
4374 stream_.deviceBuffer = 0;
\r
4377 stream_.mode = UNINITIALIZED;
\r
4378 stream_.state = STREAM_CLOSED;
\r
4381 void RtApiDs :: startStream()
\r
4384 if ( stream_.state == STREAM_RUNNING ) {
\r
4385 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4386 error( RtError::WARNING );
\r
4390 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4392 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4393 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4394 // this is already in effect.
\r
4395 timeBeginPeriod( 1 );
\r
4397 buffersRolling = false;
\r
4398 duplexPrerollBytes = 0;
\r
4400 if ( stream_.mode == DUPLEX ) {
\r
4401 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4402 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4405 HRESULT result = 0;
\r
4406 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4408 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4409 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4410 if ( FAILED( result ) ) {
\r
4411 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4412 errorText_ = errorStream_.str();
\r
4417 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4419 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4420 result = buffer->Start( DSCBSTART_LOOPING );
\r
4421 if ( FAILED( result ) ) {
\r
4422 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4423 errorText_ = errorStream_.str();
\r
4428 handle->drainCounter = 0;
\r
4429 handle->internalDrain = false;
\r
4430 ResetEvent( handle->condition );
\r
4431 stream_.state = STREAM_RUNNING;
\r
4434 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4437 void RtApiDs :: stopStream()
\r
4440 if ( stream_.state == STREAM_STOPPED ) {
\r
4441 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4442 error( RtError::WARNING );
\r
4446 HRESULT result = 0;
\r
4449 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4451 if ( handle->drainCounter == 0 ) {
\r
4452 handle->drainCounter = 2;
\r
4453 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4456 stream_.state = STREAM_STOPPED;
\r
4458 // Stop the buffer and clear memory
\r
4459 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4460 result = buffer->Stop();
\r
4461 if ( FAILED( result ) ) {
\r
4462 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4463 errorText_ = errorStream_.str();
\r
4467 // Lock the buffer and clear it so that if we start to play again,
\r
4468 // we won't have old data playing.
\r
4469 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4470 if ( FAILED( result ) ) {
\r
4471 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4472 errorText_ = errorStream_.str();
\r
4476 // Zero the DS buffer
\r
4477 ZeroMemory( audioPtr, dataLen );
\r
4479 // Unlock the DS buffer
\r
4480 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4481 if ( FAILED( result ) ) {
\r
4482 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4483 errorText_ = errorStream_.str();
\r
4487 // If we start playing again, we must begin at beginning of buffer.
\r
4488 handle->bufferPointer[0] = 0;
\r
4491 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4492 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4496 stream_.state = STREAM_STOPPED;
\r
4498 result = buffer->Stop();
\r
4499 if ( FAILED( result ) ) {
\r
4500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4501 errorText_ = errorStream_.str();
\r
4505 // Lock the buffer and clear it so that if we start to play again,
\r
4506 // we won't have old data playing.
\r
4507 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4508 if ( FAILED( result ) ) {
\r
4509 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4510 errorText_ = errorStream_.str();
\r
4514 // Zero the DS buffer
\r
4515 ZeroMemory( audioPtr, dataLen );
\r
4517 // Unlock the DS buffer
\r
4518 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4519 if ( FAILED( result ) ) {
\r
4520 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4521 errorText_ = errorStream_.str();
\r
4525 // If we start recording again, we must begin at beginning of buffer.
\r
4526 handle->bufferPointer[1] = 0;
\r
4530 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4531 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4534 void RtApiDs :: abortStream()
\r
4537 if ( stream_.state == STREAM_STOPPED ) {
\r
4538 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4539 error( RtError::WARNING );
\r
4543 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4544 handle->drainCounter = 2;
\r
4549 void RtApiDs :: callbackEvent()
\r
4551 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
4552 Sleep( 50 ); // sleep 50 milliseconds
\r
4556 if ( stream_.state == STREAM_CLOSED ) {
\r
4557 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4558 error( RtError::WARNING );
\r
4562 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4563 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4565 // Check if we were draining the stream and signal is finished.
\r
4566 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4568 stream_.state = STREAM_STOPPING;
\r
4569 if ( handle->internalDrain == false )
\r
4570 SetEvent( handle->condition );
\r
4576 // Invoke user callback to get fresh output data UNLESS we are
\r
4577 // draining stream.
\r
4578 if ( handle->drainCounter == 0 ) {
\r
4579 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4580 double streamTime = getStreamTime();
\r
4581 RtAudioStreamStatus status = 0;
\r
4582 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4583 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4584 handle->xrun[0] = false;
\r
4586 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4587 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4588 handle->xrun[1] = false;
\r
4590 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4591 stream_.bufferSize, streamTime, status, info->userData );
\r
4592 if ( cbReturnValue == 2 ) {
\r
4593 stream_.state = STREAM_STOPPING;
\r
4594 handle->drainCounter = 2;
\r
4598 else if ( cbReturnValue == 1 ) {
\r
4599 handle->drainCounter = 1;
\r
4600 handle->internalDrain = true;
\r
4605 DWORD currentWritePointer, safeWritePointer;
\r
4606 DWORD currentReadPointer, safeReadPointer;
\r
4607 UINT nextWritePointer;
\r
4609 LPVOID buffer1 = NULL;
\r
4610 LPVOID buffer2 = NULL;
\r
4611 DWORD bufferSize1 = 0;
\r
4612 DWORD bufferSize2 = 0;
\r
4617 if ( buffersRolling == false ) {
\r
4618 if ( stream_.mode == DUPLEX ) {
\r
4619 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4621 // It takes a while for the devices to get rolling. As a result,
\r
4622 // there's no guarantee that the capture and write device pointers
\r
4623 // will move in lockstep. Wait here for both devices to start
\r
4624 // rolling, and then set our buffer pointers accordingly.
\r
4625 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4626 // bytes later than the write buffer.
\r
4628 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4629 // take place between the two GetCurrentPosition calls... but I'm
\r
4630 // really not sure how to solve the problem. Temporarily boost to
\r
4631 // Realtime priority, maybe; but I'm not sure what priority the
\r
4632 // DirectSound service threads run at. We *should* be roughly
\r
4633 // within a ms or so of correct.
\r
4635 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4636 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4638 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4640 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4641 if ( FAILED( result ) ) {
\r
4642 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4643 errorText_ = errorStream_.str();
\r
4644 error( RtError::SYSTEM_ERROR );
\r
4646 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4647 if ( FAILED( result ) ) {
\r
4648 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4649 errorText_ = errorStream_.str();
\r
4650 error( RtError::SYSTEM_ERROR );
\r
4653 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4654 if ( FAILED( result ) ) {
\r
4655 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4656 errorText_ = errorStream_.str();
\r
4657 error( RtError::SYSTEM_ERROR );
\r
4659 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4660 if ( FAILED( result ) ) {
\r
4661 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4662 errorText_ = errorStream_.str();
\r
4663 error( RtError::SYSTEM_ERROR );
\r
4665 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4669 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4671 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4672 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4673 handle->bufferPointer[1] = safeReadPointer;
\r
4675 else if ( stream_.mode == OUTPUT ) {
\r
4677 // Set the proper nextWritePosition after initial startup.
\r
4678 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4679 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4680 if ( FAILED( result ) ) {
\r
4681 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4682 errorText_ = errorStream_.str();
\r
4683 error( RtError::SYSTEM_ERROR );
\r
4685 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4686 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4689 buffersRolling = true;
\r
4692 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4694 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4696 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4697 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4698 bufferBytes *= formatBytes( stream_.userFormat );
\r
4699 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4702 // Setup parameters and do buffer conversion if necessary.
\r
4703 if ( stream_.doConvertBuffer[0] ) {
\r
4704 buffer = stream_.deviceBuffer;
\r
4705 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4706 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4707 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4710 buffer = stream_.userBuffer[0];
\r
4711 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4712 bufferBytes *= formatBytes( stream_.userFormat );
\r
4715 // No byte swapping necessary in DirectSound implementation.
\r
4717 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4718 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4720 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4721 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4723 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4724 nextWritePointer = handle->bufferPointer[0];
\r
4726 DWORD endWrite, leadPointer;
\r
4728 // Find out where the read and "safe write" pointers are.
\r
4729 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4730 if ( FAILED( result ) ) {
\r
4731 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4732 errorText_ = errorStream_.str();
\r
4733 error( RtError::SYSTEM_ERROR );
\r
4736 // We will copy our output buffer into the region between
\r
4737 // safeWritePointer and leadPointer. If leadPointer is not
\r
4738 // beyond the next endWrite position, wait until it is.
\r
4739 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4740 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4741 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4742 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4743 endWrite = nextWritePointer + bufferBytes;
\r
4745 // Check whether the entire write region is behind the play pointer.
\r
4746 if ( leadPointer >= endWrite ) break;
\r
4748 // If we are here, then we must wait until the leadPointer advances
\r
4749 // beyond the end of our next write region. We use the
\r
4750 // Sleep() function to suspend operation until that happens.
\r
4751 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4752 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4753 if ( millis < 1.0 ) millis = 1.0;
\r
4754 Sleep( (DWORD) millis );
\r
4757 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4758 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4759 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4760 handle->xrun[0] = true;
\r
4761 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4762 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4763 handle->bufferPointer[0] = nextWritePointer;
\r
4764 endWrite = nextWritePointer + bufferBytes;
\r
4767 // Lock free space in the buffer
\r
4768 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4769 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4770 if ( FAILED( result ) ) {
\r
4771 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4772 errorText_ = errorStream_.str();
\r
4773 error( RtError::SYSTEM_ERROR );
\r
4776 // Copy our buffer into the DS buffer
\r
4777 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4778 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4780 // Update our buffer offset and unlock sound buffer
\r
4781 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4782 if ( FAILED( result ) ) {
\r
4783 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4784 errorText_ = errorStream_.str();
\r
4785 error( RtError::SYSTEM_ERROR );
\r
4787 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4788 handle->bufferPointer[0] = nextWritePointer;
\r
4790 if ( handle->drainCounter ) {
\r
4791 handle->drainCounter++;
\r
4796 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4798 // Setup parameters.
\r
4799 if ( stream_.doConvertBuffer[1] ) {
\r
4800 buffer = stream_.deviceBuffer;
\r
4801 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4802 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4805 buffer = stream_.userBuffer[1];
\r
4806 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4807 bufferBytes *= formatBytes( stream_.userFormat );
\r
4810 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4811 long nextReadPointer = handle->bufferPointer[1];
\r
4812 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4814 // Find out where the write and "safe read" pointers are.
\r
4815 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4816 if ( FAILED( result ) ) {
\r
4817 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4818 errorText_ = errorStream_.str();
\r
4819 error( RtError::SYSTEM_ERROR );
\r
4822 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4823 DWORD endRead = nextReadPointer + bufferBytes;
\r
4825 // Handling depends on whether we are INPUT or DUPLEX.
\r
4826 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4827 // then a wait here will drag the write pointers into the forbidden zone.
\r
4829 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4830 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4831 // practical way to sync up the read and write pointers reliably, given the
\r
4832 // the very complex relationship between phase and increment of the read and write
\r
4835 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4836 // provide a pre-roll period of 0.5 seconds in which we return
\r
4837 // zeros from the read buffer while the pointers sync up.
\r
4839 if ( stream_.mode == DUPLEX ) {
\r
4840 if ( safeReadPointer < endRead ) {
\r
4841 if ( duplexPrerollBytes <= 0 ) {
\r
4842 // Pre-roll time over. Be more agressive.
\r
4843 int adjustment = endRead-safeReadPointer;
\r
4845 handle->xrun[1] = true;
\r
4847 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4848 // and perform fine adjustments later.
\r
4849 // - small adjustments: back off by twice as much.
\r
4850 if ( adjustment >= 2*bufferBytes )
\r
4851 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4853 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4855 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4859 // In pre=roll time. Just do it.
\r
4860 nextReadPointer = safeReadPointer - bufferBytes;
\r
4861 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4863 endRead = nextReadPointer + bufferBytes;
\r
4866 else { // mode == INPUT
\r
4867 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4868 // See comments for playback.
\r
4869 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4870 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4871 if ( millis < 1.0 ) millis = 1.0;
\r
4872 Sleep( (DWORD) millis );
\r
4874 // Wake up and find out where we are now.
\r
4875 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4876 if ( FAILED( result ) ) {
\r
4877 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4878 errorText_ = errorStream_.str();
\r
4879 error( RtError::SYSTEM_ERROR );
\r
4882 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4886 // Lock free space in the buffer
\r
4887 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4888 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4889 if ( FAILED( result ) ) {
\r
4890 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4891 errorText_ = errorStream_.str();
\r
4892 error( RtError::SYSTEM_ERROR );
\r
4895 if ( duplexPrerollBytes <= 0 ) {
\r
4896 // Copy our buffer into the DS buffer
\r
4897 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4898 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4901 memset( buffer, 0, bufferSize1 );
\r
4902 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4903 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4906 // Update our buffer offset and unlock sound buffer
\r
4907 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4908 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4909 if ( FAILED( result ) ) {
\r
4910 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4911 errorText_ = errorStream_.str();
\r
4912 error( RtError::SYSTEM_ERROR );
\r
4914 handle->bufferPointer[1] = nextReadPointer;
\r
4916 // No byte swapping necessary in DirectSound implementation.
\r
4918 // If necessary, convert 8-bit data from unsigned to signed.
\r
4919 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4920 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4922 // Do buffer conversion if necessary.
\r
4923 if ( stream_.doConvertBuffer[1] )
\r
4924 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4928 RtApi::tickStreamTime();
\r
4931 // Definitions for utility functions and callbacks
\r
4932 // specific to the DirectSound implementation.
\r
4934 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4936 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4937 RtApiDs *object = (RtApiDs *) info->object;
\r
4938 bool* isRunning = &info->isRunning;
\r
4940 while ( *isRunning == true ) {
\r
4941 object->callbackEvent();
\r
4944 _endthreadex( 0 );
\r
4948 #include "tchar.h"
\r
4950 std::string convertTChar( LPCTSTR name )
\r
4952 #if defined( UNICODE ) || defined( _UNICODE )
\r
4953 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4954 std::string s( length, 0 );
\r
4955 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
4957 std::string s( name );
\r
4963 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4964 LPCTSTR description,
\r
4966 LPVOID lpContext )
\r
4968 bool *isInput = (bool *) lpContext;
\r
4971 bool validDevice = false;
\r
4972 if ( *isInput == true ) {
\r
4974 LPDIRECTSOUNDCAPTURE object;
\r
4976 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
4977 if ( hr != DS_OK ) return TRUE;
\r
4979 caps.dwSize = sizeof(caps);
\r
4980 hr = object->GetCaps( &caps );
\r
4981 if ( hr == DS_OK ) {
\r
4982 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
4983 validDevice = true;
\r
4985 object->Release();
\r
4989 LPDIRECTSOUND object;
\r
4990 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
4991 if ( hr != DS_OK ) return TRUE;
\r
4993 caps.dwSize = sizeof(caps);
\r
4994 hr = object->GetCaps( &caps );
\r
4995 if ( hr == DS_OK ) {
\r
4996 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
4997 validDevice = true;
\r
4999 object->Release();
\r
5002 // If good device, then save its name and guid.
\r
5003 std::string name = convertTChar( description );
\r
5004 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5005 name = "Default Device";
\r
5006 if ( validDevice ) {
\r
5007 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5008 if ( dsDevices[i].name == name ) {
\r
5009 dsDevices[i].found = true;
\r
5011 dsDevices[i].id[1] = lpguid;
\r
5012 dsDevices[i].validId[1] = true;
\r
5015 dsDevices[i].id[0] = lpguid;
\r
5016 dsDevices[i].validId[0] = true;
\r
5023 device.name = name;
\r
5024 device.found = true;
\r
5026 device.id[1] = lpguid;
\r
5027 device.validId[1] = true;
\r
5030 device.id[0] = lpguid;
\r
5031 device.validId[0] = true;
\r
5033 dsDevices.push_back( device );
\r
5039 static const char* getErrorString( int code )
\r
5043 case DSERR_ALLOCATED:
\r
5044 return "Already allocated";
\r
5046 case DSERR_CONTROLUNAVAIL:
\r
5047 return "Control unavailable";
\r
5049 case DSERR_INVALIDPARAM:
\r
5050 return "Invalid parameter";
\r
5052 case DSERR_INVALIDCALL:
\r
5053 return "Invalid call";
\r
5055 case DSERR_GENERIC:
\r
5056 return "Generic error";
\r
5058 case DSERR_PRIOLEVELNEEDED:
\r
5059 return "Priority level needed";
\r
5061 case DSERR_OUTOFMEMORY:
\r
5062 return "Out of memory";
\r
5064 case DSERR_BADFORMAT:
\r
5065 return "The sample rate or the channel format is not supported";
\r
5067 case DSERR_UNSUPPORTED:
\r
5068 return "Not supported";
\r
5070 case DSERR_NODRIVER:
\r
5071 return "No driver";
\r
5073 case DSERR_ALREADYINITIALIZED:
\r
5074 return "Already initialized";
\r
5076 case DSERR_NOAGGREGATION:
\r
5077 return "No aggregation";
\r
5079 case DSERR_BUFFERLOST:
\r
5080 return "Buffer lost";
\r
5082 case DSERR_OTHERAPPHASPRIO:
\r
5083 return "Another application already has priority";
\r
5085 case DSERR_UNINITIALIZED:
\r
5086 return "Uninitialized";
\r
5089 return "DirectSound unknown error";
\r
5092 //******************** End of __WINDOWS_DS__ *********************//
\r
5096 #if defined(__LINUX_ALSA__)
\r
5098 #include <alsa/asoundlib.h>
\r
5099 #include <unistd.h>
\r
5101 // A structure to hold various information related to the ALSA API
\r
5102 // implementation.
\r
5103 struct AlsaHandle {
\r
5104 snd_pcm_t *handles[2];
\r
5105 bool synchronized;
\r
5107 pthread_cond_t runnable_cv;
\r
5111 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5114 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5116 RtApiAlsa :: RtApiAlsa()
\r
5118 // Nothing to do here.
\r
5121 RtApiAlsa :: ~RtApiAlsa()
\r
5123 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5126 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5128 unsigned nDevices = 0;
\r
5129 int result, subdevice, card;
\r
5131 snd_ctl_t *handle;
\r
5133 // Count cards and devices
\r
5135 snd_card_next( &card );
\r
5136 while ( card >= 0 ) {
\r
5137 sprintf( name, "hw:%d", card );
\r
5138 result = snd_ctl_open( &handle, name, 0 );
\r
5139 if ( result < 0 ) {
\r
5140 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5141 errorText_ = errorStream_.str();
\r
5142 error( RtError::WARNING );
\r
5147 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5148 if ( result < 0 ) {
\r
5149 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5150 errorText_ = errorStream_.str();
\r
5151 error( RtError::WARNING );
\r
5154 if ( subdevice < 0 )
\r
5159 snd_ctl_close( handle );
\r
5160 snd_card_next( &card );
\r
5166 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5168 RtAudio::DeviceInfo info;
\r
5169 info.probed = false;
\r
5171 unsigned nDevices = 0;
\r
5172 int result, subdevice, card;
\r
5174 snd_ctl_t *chandle;
\r
5176 // Count cards and devices
\r
5178 snd_card_next( &card );
\r
5179 while ( card >= 0 ) {
\r
5180 sprintf( name, "hw:%d", card );
\r
5181 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5182 if ( result < 0 ) {
\r
5183 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5184 errorText_ = errorStream_.str();
\r
5185 error( RtError::WARNING );
\r
5190 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5191 if ( result < 0 ) {
\r
5192 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5193 errorText_ = errorStream_.str();
\r
5194 error( RtError::WARNING );
\r
5197 if ( subdevice < 0 ) break;
\r
5198 if ( nDevices == device ) {
\r
5199 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5205 snd_ctl_close( chandle );
\r
5206 snd_card_next( &card );
\r
5209 if ( nDevices == 0 ) {
\r
5210 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5211 error( RtError::INVALID_USE );
\r
5214 if ( device >= nDevices ) {
\r
5215 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5216 error( RtError::INVALID_USE );
\r
5221 // If a stream is already open, we cannot probe the stream devices.
\r
5222 // Thus, use the saved results.
\r
5223 if ( stream_.state != STREAM_CLOSED &&
\r
5224 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5225 snd_ctl_close( chandle );
\r
5226 if ( device >= devices_.size() ) {
\r
5227 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5228 error( RtError::WARNING );
\r
5231 return devices_[ device ];
\r
5234 int openMode = SND_PCM_ASYNC;
\r
5235 snd_pcm_stream_t stream;
\r
5236 snd_pcm_info_t *pcminfo;
\r
5237 snd_pcm_info_alloca( &pcminfo );
\r
5238 snd_pcm_t *phandle;
\r
5239 snd_pcm_hw_params_t *params;
\r
5240 snd_pcm_hw_params_alloca( ¶ms );
\r
5242 // First try for playback
\r
5243 stream = SND_PCM_STREAM_PLAYBACK;
\r
5244 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5245 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5246 snd_pcm_info_set_stream( pcminfo, stream );
\r
5248 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5249 if ( result < 0 ) {
\r
5250 // Device probably doesn't support playback.
\r
5251 goto captureProbe;
\r
5254 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5255 if ( result < 0 ) {
\r
5256 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5257 errorText_ = errorStream_.str();
\r
5258 error( RtError::WARNING );
\r
5259 goto captureProbe;
\r
5262 // The device is open ... fill the parameter structure.
\r
5263 result = snd_pcm_hw_params_any( phandle, params );
\r
5264 if ( result < 0 ) {
\r
5265 snd_pcm_close( phandle );
\r
5266 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5267 errorText_ = errorStream_.str();
\r
5268 error( RtError::WARNING );
\r
5269 goto captureProbe;
\r
5272 // Get output channel information.
\r
5273 unsigned int value;
\r
5274 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5275 if ( result < 0 ) {
\r
5276 snd_pcm_close( phandle );
\r
5277 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5278 errorText_ = errorStream_.str();
\r
5279 error( RtError::WARNING );
\r
5280 goto captureProbe;
\r
5282 info.outputChannels = value;
\r
5283 snd_pcm_close( phandle );
\r
5286 // Now try for capture
\r
5287 stream = SND_PCM_STREAM_CAPTURE;
\r
5288 snd_pcm_info_set_stream( pcminfo, stream );
\r
5290 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5291 snd_ctl_close( chandle );
\r
5292 if ( result < 0 ) {
\r
5293 // Device probably doesn't support capture.
\r
5294 if ( info.outputChannels == 0 ) return info;
\r
5295 goto probeParameters;
\r
5298 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5299 if ( result < 0 ) {
\r
5300 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5301 errorText_ = errorStream_.str();
\r
5302 error( RtError::WARNING );
\r
5303 if ( info.outputChannels == 0 ) return info;
\r
5304 goto probeParameters;
\r
5307 // The device is open ... fill the parameter structure.
\r
5308 result = snd_pcm_hw_params_any( phandle, params );
\r
5309 if ( result < 0 ) {
\r
5310 snd_pcm_close( phandle );
\r
5311 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5312 errorText_ = errorStream_.str();
\r
5313 error( RtError::WARNING );
\r
5314 if ( info.outputChannels == 0 ) return info;
\r
5315 goto probeParameters;
\r
5318 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5319 if ( result < 0 ) {
\r
5320 snd_pcm_close( phandle );
\r
5321 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5322 errorText_ = errorStream_.str();
\r
5323 error( RtError::WARNING );
\r
5324 if ( info.outputChannels == 0 ) return info;
\r
5325 goto probeParameters;
\r
5327 info.inputChannels = value;
\r
5328 snd_pcm_close( phandle );
\r
5330 // If device opens for both playback and capture, we determine the channels.
\r
5331 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5332 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5334 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5335 if ( device == 0 && info.outputChannels > 0 )
\r
5336 info.isDefaultOutput = true;
\r
5337 if ( device == 0 && info.inputChannels > 0 )
\r
5338 info.isDefaultInput = true;
\r
5341 // At this point, we just need to figure out the supported data
\r
5342 // formats and sample rates. We'll proceed by opening the device in
\r
5343 // the direction with the maximum number of channels, or playback if
\r
5344 // they are equal. This might limit our sample rate options, but so
\r
5347 if ( info.outputChannels >= info.inputChannels )
\r
5348 stream = SND_PCM_STREAM_PLAYBACK;
\r
5350 stream = SND_PCM_STREAM_CAPTURE;
\r
5351 snd_pcm_info_set_stream( pcminfo, stream );
\r
5353 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5354 if ( result < 0 ) {
\r
5355 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5356 errorText_ = errorStream_.str();
\r
5357 error( RtError::WARNING );
\r
5361 // The device is open ... fill the parameter structure.
\r
5362 result = snd_pcm_hw_params_any( phandle, params );
\r
5363 if ( result < 0 ) {
\r
5364 snd_pcm_close( phandle );
\r
5365 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5366 errorText_ = errorStream_.str();
\r
5367 error( RtError::WARNING );
\r
5371 // Test our discrete set of sample rate values.
\r
5372 info.sampleRates.clear();
\r
5373 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5374 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5375 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5377 if ( info.sampleRates.size() == 0 ) {
\r
5378 snd_pcm_close( phandle );
\r
5379 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5380 errorText_ = errorStream_.str();
\r
5381 error( RtError::WARNING );
\r
5385 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5386 snd_pcm_format_t format;
\r
5387 info.nativeFormats = 0;
\r
5388 format = SND_PCM_FORMAT_S8;
\r
5389 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5390 info.nativeFormats |= RTAUDIO_SINT8;
\r
5391 format = SND_PCM_FORMAT_S16;
\r
5392 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5393 info.nativeFormats |= RTAUDIO_SINT16;
\r
5394 format = SND_PCM_FORMAT_S24;
\r
5395 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5396 info.nativeFormats |= RTAUDIO_SINT24;
\r
5397 format = SND_PCM_FORMAT_S32;
\r
5398 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5399 info.nativeFormats |= RTAUDIO_SINT32;
\r
5400 format = SND_PCM_FORMAT_FLOAT;
\r
5401 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5402 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5403 format = SND_PCM_FORMAT_FLOAT64;
\r
5404 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5405 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5407 // Check that we have at least one supported format
\r
5408 if ( info.nativeFormats == 0 ) {
\r
5409 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5410 errorText_ = errorStream_.str();
\r
5411 error( RtError::WARNING );
\r
5415 // Get the device name
\r
5417 result = snd_card_get_name( card, &cardname );
\r
5418 if ( result >= 0 )
\r
5419 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5422 // That's all ... close the device and return
\r
5423 snd_pcm_close( phandle );
\r
5424 info.probed = true;
\r
5428 void RtApiAlsa :: saveDeviceInfo( void )
\r
5432 unsigned int nDevices = getDeviceCount();
\r
5433 devices_.resize( nDevices );
\r
5434 for ( unsigned int i=0; i<nDevices; i++ )
\r
5435 devices_[i] = getDeviceInfo( i );
\r
5438 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5439 unsigned int firstChannel, unsigned int sampleRate,
\r
5440 RtAudioFormat format, unsigned int *bufferSize,
\r
5441 RtAudio::StreamOptions *options )
\r
5444 #if defined(__RTAUDIO_DEBUG__)
\r
5445 snd_output_t *out;
\r
5446 snd_output_stdio_attach(&out, stderr, 0);
\r
5449 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5451 unsigned nDevices = 0;
\r
5452 int result, subdevice, card;
\r
5454 snd_ctl_t *chandle;
\r
5456 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5457 snprintf(name, sizeof(name), "%s", "default");
\r
5459 // Count cards and devices
\r
5461 snd_card_next( &card );
\r
5462 while ( card >= 0 ) {
\r
5463 sprintf( name, "hw:%d", card );
\r
5464 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5465 if ( result < 0 ) {
\r
5466 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5467 errorText_ = errorStream_.str();
\r
5472 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5473 if ( result < 0 ) break;
\r
5474 if ( subdevice < 0 ) break;
\r
5475 if ( nDevices == device ) {
\r
5476 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5477 snd_ctl_close( chandle );
\r
5482 snd_ctl_close( chandle );
\r
5483 snd_card_next( &card );
\r
5486 if ( nDevices == 0 ) {
\r
5487 // This should not happen because a check is made before this function is called.
\r
5488 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5492 if ( device >= nDevices ) {
\r
5493 // This should not happen because a check is made before this function is called.
\r
5494 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5501 // The getDeviceInfo() function will not work for a device that is
\r
5502 // already open. Thus, we'll probe the system before opening a
\r
5503 // stream and save the results for use by getDeviceInfo().
\r
5504 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5505 this->saveDeviceInfo();
\r
5507 snd_pcm_stream_t stream;
\r
5508 if ( mode == OUTPUT )
\r
5509 stream = SND_PCM_STREAM_PLAYBACK;
\r
5511 stream = SND_PCM_STREAM_CAPTURE;
\r
5513 snd_pcm_t *phandle;
\r
5514 int openMode = SND_PCM_ASYNC;
\r
5515 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5516 if ( result < 0 ) {
\r
5517 if ( mode == OUTPUT )
\r
5518 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5520 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5521 errorText_ = errorStream_.str();
\r
5525 // Fill the parameter structure.
\r
5526 snd_pcm_hw_params_t *hw_params;
\r
5527 snd_pcm_hw_params_alloca( &hw_params );
\r
5528 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5529 if ( result < 0 ) {
\r
5530 snd_pcm_close( phandle );
\r
5531 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5532 errorText_ = errorStream_.str();
\r
5536 #if defined(__RTAUDIO_DEBUG__)
\r
5537 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5538 snd_pcm_hw_params_dump( hw_params, out );
\r
5541 // Set access ... check user preference.
\r
5542 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5543 stream_.userInterleaved = false;
\r
5544 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5545 if ( result < 0 ) {
\r
5546 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5547 stream_.deviceInterleaved[mode] = true;
\r
5550 stream_.deviceInterleaved[mode] = false;
\r
5553 stream_.userInterleaved = true;
\r
5554 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5555 if ( result < 0 ) {
\r
5556 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5557 stream_.deviceInterleaved[mode] = false;
\r
5560 stream_.deviceInterleaved[mode] = true;
\r
5563 if ( result < 0 ) {
\r
5564 snd_pcm_close( phandle );
\r
5565 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5566 errorText_ = errorStream_.str();
\r
5570 // Determine how to set the device format.
\r
5571 stream_.userFormat = format;
\r
5572 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5574 if ( format == RTAUDIO_SINT8 )
\r
5575 deviceFormat = SND_PCM_FORMAT_S8;
\r
5576 else if ( format == RTAUDIO_SINT16 )
\r
5577 deviceFormat = SND_PCM_FORMAT_S16;
\r
5578 else if ( format == RTAUDIO_SINT24 )
\r
5579 deviceFormat = SND_PCM_FORMAT_S24;
\r
5580 else if ( format == RTAUDIO_SINT32 )
\r
5581 deviceFormat = SND_PCM_FORMAT_S32;
\r
5582 else if ( format == RTAUDIO_FLOAT32 )
\r
5583 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5584 else if ( format == RTAUDIO_FLOAT64 )
\r
5585 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5587 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5588 stream_.deviceFormat[mode] = format;
\r
5592 // The user requested format is not natively supported by the device.
\r
5593 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5594 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5595 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5599 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5600 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5601 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5605 deviceFormat = SND_PCM_FORMAT_S32;
\r
5606 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5607 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5611 deviceFormat = SND_PCM_FORMAT_S24;
\r
5612 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5613 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5617 deviceFormat = SND_PCM_FORMAT_S16;
\r
5618 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5619 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5623 deviceFormat = SND_PCM_FORMAT_S8;
\r
5624 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5625 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5629 // If we get here, no supported format was found.
\r
5630 snd_pcm_close( phandle );
\r
5631 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5632 errorText_ = errorStream_.str();
\r
5636 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5637 if ( result < 0 ) {
\r
5638 snd_pcm_close( phandle );
\r
5639 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5640 errorText_ = errorStream_.str();
\r
5644 // Determine whether byte-swaping is necessary.
\r
5645 stream_.doByteSwap[mode] = false;
\r
5646 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5647 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5648 if ( result == 0 )
\r
5649 stream_.doByteSwap[mode] = true;
\r
5650 else if (result < 0) {
\r
5651 snd_pcm_close( phandle );
\r
5652 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5653 errorText_ = errorStream_.str();
\r
5658 // Set the sample rate.
\r
5659 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5660 if ( result < 0 ) {
\r
5661 snd_pcm_close( phandle );
\r
5662 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5663 errorText_ = errorStream_.str();
\r
5667 // Determine the number of channels for this device. We support a possible
\r
5668 // minimum device channel number > than the value requested by the user.
\r
5669 stream_.nUserChannels[mode] = channels;
\r
5670 unsigned int value;
\r
5671 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5672 unsigned int deviceChannels = value;
\r
5673 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5674 snd_pcm_close( phandle );
\r
5675 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5676 errorText_ = errorStream_.str();
\r
5680 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5681 if ( result < 0 ) {
\r
5682 snd_pcm_close( phandle );
\r
5683 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5684 errorText_ = errorStream_.str();
\r
5687 deviceChannels = value;
\r
5688 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5689 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5691 // Set the device channels.
\r
5692 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5693 if ( result < 0 ) {
\r
5694 snd_pcm_close( phandle );
\r
5695 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5696 errorText_ = errorStream_.str();
\r
5700 // Set the buffer (or period) size.
\r
5702 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5703 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5704 if ( result < 0 ) {
\r
5705 snd_pcm_close( phandle );
\r
5706 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5707 errorText_ = errorStream_.str();
\r
5710 *bufferSize = periodSize;
\r
5712 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5713 unsigned int periods = 0;
\r
5714 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5715 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5716 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5717 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5718 if ( result < 0 ) {
\r
5719 snd_pcm_close( phandle );
\r
5720 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5721 errorText_ = errorStream_.str();
\r
5725 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5726 // MUST be the same in both directions!
\r
5727 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5728 snd_pcm_close( phandle );
\r
5729 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5730 errorText_ = errorStream_.str();
\r
5734 stream_.bufferSize = *bufferSize;
\r
5736 // Install the hardware configuration
\r
5737 result = snd_pcm_hw_params( phandle, hw_params );
\r
5738 if ( result < 0 ) {
\r
5739 snd_pcm_close( phandle );
\r
5740 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5741 errorText_ = errorStream_.str();
\r
5745 #if defined(__RTAUDIO_DEBUG__)
\r
5746 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5747 snd_pcm_hw_params_dump( hw_params, out );
\r
5750 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5751 snd_pcm_sw_params_t *sw_params = NULL;
\r
5752 snd_pcm_sw_params_alloca( &sw_params );
\r
5753 snd_pcm_sw_params_current( phandle, sw_params );
\r
5754 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5755 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5756 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5758 // The following two settings were suggested by Theo Veenker
\r
5759 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5760 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5762 // here are two options for a fix
\r
5763 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5764 snd_pcm_uframes_t val;
\r
5765 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5766 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5768 result = snd_pcm_sw_params( phandle, sw_params );
\r
5769 if ( result < 0 ) {
\r
5770 snd_pcm_close( phandle );
\r
5771 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5772 errorText_ = errorStream_.str();
\r
5776 #if defined(__RTAUDIO_DEBUG__)
\r
5777 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5778 snd_pcm_sw_params_dump( sw_params, out );
\r
5781 // Set flags for buffer conversion
\r
5782 stream_.doConvertBuffer[mode] = false;
\r
5783 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5784 stream_.doConvertBuffer[mode] = true;
\r
5785 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5786 stream_.doConvertBuffer[mode] = true;
\r
5787 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5788 stream_.nUserChannels[mode] > 1 )
\r
5789 stream_.doConvertBuffer[mode] = true;
\r
5791 // Allocate the ApiHandle if necessary and then save.
\r
5792 AlsaHandle *apiInfo = 0;
\r
5793 if ( stream_.apiHandle == 0 ) {
\r
5795 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5797 catch ( std::bad_alloc& ) {
\r
5798 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5802 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5803 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5807 stream_.apiHandle = (void *) apiInfo;
\r
5808 apiInfo->handles[0] = 0;
\r
5809 apiInfo->handles[1] = 0;
\r
5812 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5814 apiInfo->handles[mode] = phandle;
\r
5817 // Allocate necessary internal buffers.
\r
5818 unsigned long bufferBytes;
\r
5819 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5820 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5821 if ( stream_.userBuffer[mode] == NULL ) {
\r
5822 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5826 if ( stream_.doConvertBuffer[mode] ) {
\r
5828 bool makeBuffer = true;
\r
5829 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5830 if ( mode == INPUT ) {
\r
5831 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5832 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5833 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5837 if ( makeBuffer ) {
\r
5838 bufferBytes *= *bufferSize;
\r
5839 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5840 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5841 if ( stream_.deviceBuffer == NULL ) {
\r
5842 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5848 stream_.sampleRate = sampleRate;
\r
5849 stream_.nBuffers = periods;
\r
5850 stream_.device[mode] = device;
\r
5851 stream_.state = STREAM_STOPPED;
\r
5853 // Setup the buffer conversion information structure.
\r
5854 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5856 // Setup thread if necessary.
\r
5857 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5858 // We had already set up an output stream.
\r
5859 stream_.mode = DUPLEX;
\r
5860 // Link the streams if possible.
\r
5861 apiInfo->synchronized = false;
\r
5862 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5863 apiInfo->synchronized = true;
\r
5865 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5866 error( RtError::WARNING );
\r
5870 stream_.mode = mode;
\r
5872 // Setup callback thread.
\r
5873 stream_.callbackInfo.object = (void *) this;
\r
5875 // Set the thread attributes for joinable and realtime scheduling
\r
5876 // priority (optional). The higher priority will only take affect
\r
5877 // if the program is run as root or suid. Note, under Linux
\r
5878 // processes with CAP_SYS_NICE privilege, a user can change
\r
5879 // scheduling policy and priority (thus need not be root). See
\r
5880 // POSIX "capabilities".
\r
5881 pthread_attr_t attr;
\r
5882 pthread_attr_init( &attr );
\r
5883 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5885 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5886 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5887 // We previously attempted to increase the audio callback priority
\r
5888 // to SCHED_RR here via the attributes. However, while no errors
\r
5889 // were reported in doing so, it did not work. So, now this is
\r
5890 // done in the alsaCallbackHandler function.
\r
5891 stream_.callbackInfo.doRealtime = true;
\r
5892 int priority = options->priority;
\r
5893 int min = sched_get_priority_min( SCHED_RR );
\r
5894 int max = sched_get_priority_max( SCHED_RR );
\r
5895 if ( priority < min ) priority = min;
\r
5896 else if ( priority > max ) priority = max;
\r
5897 stream_.callbackInfo.priority = priority;
\r
5901 stream_.callbackInfo.isRunning = true;
\r
5902 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5903 pthread_attr_destroy( &attr );
\r
5905 stream_.callbackInfo.isRunning = false;
\r
5906 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5915 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5916 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5917 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5919 stream_.apiHandle = 0;
\r
5922 if ( phandle) snd_pcm_close( phandle );
\r
5924 for ( int i=0; i<2; i++ ) {
\r
5925 if ( stream_.userBuffer[i] ) {
\r
5926 free( stream_.userBuffer[i] );
\r
5927 stream_.userBuffer[i] = 0;
\r
5931 if ( stream_.deviceBuffer ) {
\r
5932 free( stream_.deviceBuffer );
\r
5933 stream_.deviceBuffer = 0;
\r
5939 void RtApiAlsa :: closeStream()
\r
5941 if ( stream_.state == STREAM_CLOSED ) {
\r
5942 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5943 error( RtError::WARNING );
\r
5947 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5948 stream_.callbackInfo.isRunning = false;
\r
5949 MUTEX_LOCK( &stream_.mutex );
\r
5950 if ( stream_.state == STREAM_STOPPED ) {
\r
5951 apiInfo->runnable = true;
\r
5952 pthread_cond_signal( &apiInfo->runnable_cv );
\r
5954 MUTEX_UNLOCK( &stream_.mutex );
\r
5955 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5957 if ( stream_.state == STREAM_RUNNING ) {
\r
5958 stream_.state = STREAM_STOPPED;
\r
5959 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
5960 snd_pcm_drop( apiInfo->handles[0] );
\r
5961 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
5962 snd_pcm_drop( apiInfo->handles[1] );
\r
5966 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5967 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5968 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5970 stream_.apiHandle = 0;
\r
5973 for ( int i=0; i<2; i++ ) {
\r
5974 if ( stream_.userBuffer[i] ) {
\r
5975 free( stream_.userBuffer[i] );
\r
5976 stream_.userBuffer[i] = 0;
\r
5980 if ( stream_.deviceBuffer ) {
\r
5981 free( stream_.deviceBuffer );
\r
5982 stream_.deviceBuffer = 0;
\r
5985 stream_.mode = UNINITIALIZED;
\r
5986 stream_.state = STREAM_CLOSED;
\r
5989 void RtApiAlsa :: startStream()
\r
5991 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
5994 if ( stream_.state == STREAM_RUNNING ) {
\r
5995 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
5996 error( RtError::WARNING );
\r
6000 MUTEX_LOCK( &stream_.mutex );
\r
6003 snd_pcm_state_t state;
\r
6004 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6005 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6006 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6007 state = snd_pcm_state( handle[0] );
\r
6008 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6009 result = snd_pcm_prepare( handle[0] );
\r
6010 if ( result < 0 ) {
\r
6011 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6012 errorText_ = errorStream_.str();
\r
6018 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6019 state = snd_pcm_state( handle[1] );
\r
6020 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6021 result = snd_pcm_prepare( handle[1] );
\r
6022 if ( result < 0 ) {
\r
6023 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6024 errorText_ = errorStream_.str();
\r
6030 stream_.state = STREAM_RUNNING;
\r
6033 apiInfo->runnable = true;
\r
6034 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6035 MUTEX_UNLOCK( &stream_.mutex );
\r
6037 if ( result >= 0 ) return;
\r
6038 error( RtError::SYSTEM_ERROR );
\r
6041 void RtApiAlsa :: stopStream()
\r
6044 if ( stream_.state == STREAM_STOPPED ) {
\r
6045 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6046 error( RtError::WARNING );
\r
6050 stream_.state = STREAM_STOPPED;
\r
6051 MUTEX_LOCK( &stream_.mutex );
\r
6054 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6055 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6056 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6057 if ( apiInfo->synchronized )
\r
6058 result = snd_pcm_drop( handle[0] );
\r
6060 result = snd_pcm_drain( handle[0] );
\r
6061 if ( result < 0 ) {
\r
6062 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6063 errorText_ = errorStream_.str();
\r
6068 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6069 result = snd_pcm_drop( handle[1] );
\r
6070 if ( result < 0 ) {
\r
6071 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6072 errorText_ = errorStream_.str();
\r
6078 MUTEX_UNLOCK( &stream_.mutex );
\r
6080 if ( result >= 0 ) return;
\r
6081 error( RtError::SYSTEM_ERROR );
\r
6084 void RtApiAlsa :: abortStream()
\r
6087 if ( stream_.state == STREAM_STOPPED ) {
\r
6088 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6089 error( RtError::WARNING );
\r
6093 stream_.state = STREAM_STOPPED;
\r
6094 MUTEX_LOCK( &stream_.mutex );
\r
6097 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6098 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6099 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6100 result = snd_pcm_drop( handle[0] );
\r
6101 if ( result < 0 ) {
\r
6102 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6103 errorText_ = errorStream_.str();
\r
6108 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6109 result = snd_pcm_drop( handle[1] );
\r
6110 if ( result < 0 ) {
\r
6111 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6112 errorText_ = errorStream_.str();
\r
6118 MUTEX_UNLOCK( &stream_.mutex );
\r
6120 if ( result >= 0 ) return;
\r
6121 error( RtError::SYSTEM_ERROR );
\r
6124 void RtApiAlsa :: callbackEvent()
\r
6126 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6127 if ( stream_.state == STREAM_STOPPED ) {
\r
6128 MUTEX_LOCK( &stream_.mutex );
\r
6129 while ( !apiInfo->runnable )
\r
6130 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6132 if ( stream_.state != STREAM_RUNNING ) {
\r
6133 MUTEX_UNLOCK( &stream_.mutex );
\r
6136 MUTEX_UNLOCK( &stream_.mutex );
\r
6139 if ( stream_.state == STREAM_CLOSED ) {
\r
6140 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6141 error( RtError::WARNING );
\r
6145 int doStopStream = 0;
\r
6146 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6147 double streamTime = getStreamTime();
\r
6148 RtAudioStreamStatus status = 0;
\r
6149 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6150 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6151 apiInfo->xrun[0] = false;
\r
6153 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6154 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6155 apiInfo->xrun[1] = false;
\r
6157 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6158 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6160 if ( doStopStream == 2 ) {
\r
6165 MUTEX_LOCK( &stream_.mutex );
\r
6167 // The state might change while waiting on a mutex.
\r
6168 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6173 snd_pcm_t **handle;
\r
6174 snd_pcm_sframes_t frames;
\r
6175 RtAudioFormat format;
\r
6176 handle = (snd_pcm_t **) apiInfo->handles;
\r
6178 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6180 // Setup parameters.
\r
6181 if ( stream_.doConvertBuffer[1] ) {
\r
6182 buffer = stream_.deviceBuffer;
\r
6183 channels = stream_.nDeviceChannels[1];
\r
6184 format = stream_.deviceFormat[1];
\r
6187 buffer = stream_.userBuffer[1];
\r
6188 channels = stream_.nUserChannels[1];
\r
6189 format = stream_.userFormat;
\r
6192 // Read samples from device in interleaved/non-interleaved format.
\r
6193 if ( stream_.deviceInterleaved[1] )
\r
6194 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6196 void *bufs[channels];
\r
6197 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6198 for ( int i=0; i<channels; i++ )
\r
6199 bufs[i] = (void *) (buffer + (i * offset));
\r
6200 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6203 if ( result < (int) stream_.bufferSize ) {
\r
6204 // Either an error or overrun occured.
\r
6205 if ( result == -EPIPE ) {
\r
6206 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6207 if ( state == SND_PCM_STATE_XRUN ) {
\r
6208 apiInfo->xrun[1] = true;
\r
6209 result = snd_pcm_prepare( handle[1] );
\r
6210 if ( result < 0 ) {
\r
6211 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6212 errorText_ = errorStream_.str();
\r
6216 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6217 errorText_ = errorStream_.str();
\r
6221 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6222 errorText_ = errorStream_.str();
\r
6224 error( RtError::WARNING );
\r
6228 // Do byte swapping if necessary.
\r
6229 if ( stream_.doByteSwap[1] )
\r
6230 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6232 // Do buffer conversion if necessary.
\r
6233 if ( stream_.doConvertBuffer[1] )
\r
6234 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6236 // Check stream latency
\r
6237 result = snd_pcm_delay( handle[1], &frames );
\r
6238 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6243 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6245 // Setup parameters and do buffer conversion if necessary.
\r
6246 if ( stream_.doConvertBuffer[0] ) {
\r
6247 buffer = stream_.deviceBuffer;
\r
6248 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6249 channels = stream_.nDeviceChannels[0];
\r
6250 format = stream_.deviceFormat[0];
\r
6253 buffer = stream_.userBuffer[0];
\r
6254 channels = stream_.nUserChannels[0];
\r
6255 format = stream_.userFormat;
\r
6258 // Do byte swapping if necessary.
\r
6259 if ( stream_.doByteSwap[0] )
\r
6260 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6262 // Write samples to device in interleaved/non-interleaved format.
\r
6263 if ( stream_.deviceInterleaved[0] )
\r
6264 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6266 void *bufs[channels];
\r
6267 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6268 for ( int i=0; i<channels; i++ )
\r
6269 bufs[i] = (void *) (buffer + (i * offset));
\r
6270 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6273 if ( result < (int) stream_.bufferSize ) {
\r
6274 // Either an error or underrun occured.
\r
6275 if ( result == -EPIPE ) {
\r
6276 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6277 if ( state == SND_PCM_STATE_XRUN ) {
\r
6278 apiInfo->xrun[0] = true;
\r
6279 result = snd_pcm_prepare( handle[0] );
\r
6280 if ( result < 0 ) {
\r
6281 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6282 errorText_ = errorStream_.str();
\r
6286 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6287 errorText_ = errorStream_.str();
\r
6291 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6292 errorText_ = errorStream_.str();
\r
6294 error( RtError::WARNING );
\r
6298 // Check stream latency
\r
6299 result = snd_pcm_delay( handle[0], &frames );
\r
6300 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6304 MUTEX_UNLOCK( &stream_.mutex );
\r
6306 RtApi::tickStreamTime();
\r
6307 if ( doStopStream == 1 ) this->stopStream();
\r
6310 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6312 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6313 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6314 bool *isRunning = &info->isRunning;
\r
6316 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6317 if ( &info->doRealtime ) {
\r
6318 pthread_t tID = pthread_self(); // ID of this thread
\r
6319 sched_param prio = { info->priority }; // scheduling priority of thread
\r
6320 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
6324 while ( *isRunning == true ) {
\r
6325 pthread_testcancel();
\r
6326 object->callbackEvent();
\r
6329 pthread_exit( NULL );
\r
6332 //******************** End of __LINUX_ALSA__ *********************//
\r
6335 #if defined(__LINUX_PULSE__)
\r
6337 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
6338 // and Tristan Matthews.
\r
6340 #include <pulse/error.h>
\r
6341 #include <pulse/simple.h>
\r
6345 const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
6346 44100, 48000, 96000, 0}; }
\r
6348 struct rtaudio_pa_format_mapping_t {
\r
6349 RtAudioFormat rtaudio_format;
\r
6350 pa_sample_format_t pa_format;
\r
6353 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
6354 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
6355 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
6356 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
6357 {0, PA_SAMPLE_INVALID}};
\r
6359 struct PulseAudioHandle {
\r
6360 pa_simple *s_play;
\r
6363 pthread_cond_t runnable_cv;
\r
6365 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
6368 RtApiPulse::~RtApiPulse()
\r
6370 if ( stream_.state != STREAM_CLOSED )
\r
6374 unsigned int RtApiPulse::getDeviceCount( void )
\r
6379 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
\r
6381 RtAudio::DeviceInfo info;
\r
6382 info.probed = true;
\r
6383 info.name = "PulseAudio";
\r
6384 info.outputChannels = 2;
\r
6385 info.inputChannels = 2;
\r
6386 info.duplexChannels = 2;
\r
6387 info.isDefaultOutput = true;
\r
6388 info.isDefaultInput = true;
\r
6390 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
6391 info.sampleRates.push_back( *sr );
\r
6393 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
6398 extern "C" void *pulseaudio_callback( void * user )
\r
6400 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
6401 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
6402 volatile bool *isRunning = &cbi->isRunning;
\r
6404 while ( *isRunning ) {
\r
6405 pthread_testcancel();
\r
6406 context->callbackEvent();
\r
6409 pthread_exit( NULL );
\r
6412 void RtApiPulse::closeStream( void )
\r
6414 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6416 stream_.callbackInfo.isRunning = false;
\r
6418 MUTEX_LOCK( &stream_.mutex );
\r
6419 if ( stream_.state == STREAM_STOPPED ) {
\r
6420 pah->runnable = true;
\r
6421 pthread_cond_signal( &pah->runnable_cv );
\r
6423 MUTEX_UNLOCK( &stream_.mutex );
\r
6425 pthread_join( pah->thread, 0 );
\r
6426 if ( pah->s_play ) {
\r
6427 pa_simple_flush( pah->s_play, NULL );
\r
6428 pa_simple_free( pah->s_play );
\r
6431 pa_simple_free( pah->s_rec );
\r
6433 pthread_cond_destroy( &pah->runnable_cv );
\r
6435 stream_.apiHandle = 0;
\r
6438 if ( stream_.userBuffer[0] ) {
\r
6439 free( stream_.userBuffer[0] );
\r
6440 stream_.userBuffer[0] = 0;
\r
6442 if ( stream_.userBuffer[1] ) {
\r
6443 free( stream_.userBuffer[1] );
\r
6444 stream_.userBuffer[1] = 0;
\r
6447 stream_.state = STREAM_CLOSED;
\r
6448 stream_.mode = UNINITIALIZED;
\r
6451 void RtApiPulse::callbackEvent( void )
\r
6453 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6455 if ( stream_.state == STREAM_STOPPED ) {
\r
6456 MUTEX_LOCK( &stream_.mutex );
\r
6457 while ( !pah->runnable )
\r
6458 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
6460 if ( stream_.state != STREAM_RUNNING ) {
\r
6461 MUTEX_UNLOCK( &stream_.mutex );
\r
6464 MUTEX_UNLOCK( &stream_.mutex );
\r
6467 if ( stream_.state == STREAM_CLOSED ) {
\r
6468 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
6469 "this shouldn't happen!";
\r
6470 error( RtError::WARNING );
\r
6474 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6475 double streamTime = getStreamTime();
\r
6476 RtAudioStreamStatus status = 0;
\r
6477 int doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6478 stream_.bufferSize, streamTime, status,
\r
6479 stream_.callbackInfo.userData );
\r
6481 if ( doStopStream == 2 ) {
\r
6486 MUTEX_LOCK( &stream_.mutex );
\r
6488 if ( stream_.state != STREAM_RUNNING )
\r
6493 switch ( stream_.mode ) {
\r
6495 bytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6496 if ( pa_simple_read( pah->s_rec, stream_.userBuffer[1], bytes, &pa_error ) < 0 ) {
\r
6497 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6498 pa_strerror( pa_error ) << ".";
\r
6499 errorText_ = errorStream_.str();
\r
6500 error( RtError::WARNING );
\r
6504 bytes = stream_.nUserChannels[0] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6505 if ( pa_simple_write( pah->s_play, stream_.userBuffer[0], bytes, &pa_error ) < 0 ) {
\r
6506 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6507 pa_strerror( pa_error ) << ".";
\r
6508 errorText_ = errorStream_.str();
\r
6509 error( RtError::WARNING );
\r
6513 bytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6514 if ( pa_simple_read( pah->s_rec, stream_.userBuffer[1], bytes, &pa_error ) < 0 ) {
\r
6515 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
6516 pa_strerror( pa_error ) << ".";
\r
6517 errorText_ = errorStream_.str();
\r
6518 error( RtError::WARNING );
\r
6520 bytes = stream_.nUserChannels[0] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
6521 if ( pa_simple_write( pah->s_play, stream_.userBuffer[0], bytes, &pa_error ) < 0) {
\r
6522 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
6523 pa_strerror( pa_error ) << ".";
\r
6524 errorText_ = errorStream_.str();
\r
6525 error( RtError::WARNING );
\r
6534 MUTEX_UNLOCK( &stream_.mutex );
\r
6535 RtApi::tickStreamTime();
\r
6537 if ( doStopStream == 1 )
\r
6541 void RtApiPulse::startStream( void )
\r
6543 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6545 if ( stream_.state == STREAM_CLOSED ) {
\r
6546 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
6547 error( RtError::INVALID_USE );
\r
6550 if ( stream_.state == STREAM_RUNNING ) {
\r
6551 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
6552 error( RtError::WARNING );
\r
6556 MUTEX_LOCK( &stream_.mutex );
\r
6558 stream_.state = STREAM_RUNNING;
\r
6560 pah->runnable = true;
\r
6561 pthread_cond_signal( &pah->runnable_cv );
\r
6562 MUTEX_UNLOCK( &stream_.mutex );
\r
6565 void RtApiPulse::stopStream( void )
\r
6567 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6569 if ( stream_.state == STREAM_CLOSED ) {
\r
6570 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
6571 error( RtError::INVALID_USE );
\r
6574 if ( stream_.state == STREAM_STOPPED ) {
\r
6575 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
6576 error( RtError::WARNING );
\r
6580 stream_.state = STREAM_STOPPED;
\r
6581 MUTEX_LOCK( &stream_.mutex );
\r
6583 if ( pah && pah->s_play ) {
\r
6585 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
6586 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
6587 pa_strerror( pa_error ) << ".";
\r
6588 errorText_ = errorStream_.str();
\r
6589 MUTEX_UNLOCK( &stream_.mutex );
\r
6590 error( RtError::SYSTEM_ERROR );
\r
6594 stream_.state = STREAM_STOPPED;
\r
6595 MUTEX_UNLOCK( &stream_.mutex );
\r
6598 void RtApiPulse::abortStream( void )
\r
6600 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
6602 if ( stream_.state == STREAM_CLOSED ) {
\r
6603 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
6604 error( RtError::INVALID_USE );
\r
6607 if ( stream_.state == STREAM_STOPPED ) {
\r
6608 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
6609 error( RtError::WARNING );
\r
6613 stream_.state = STREAM_STOPPED;
\r
6614 MUTEX_LOCK( &stream_.mutex );
\r
6616 if ( pah && pah->s_play ) {
\r
6618 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
6619 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
6620 pa_strerror( pa_error ) << ".";
\r
6621 errorText_ = errorStream_.str();
\r
6622 MUTEX_UNLOCK( &stream_.mutex );
\r
6623 error( RtError::SYSTEM_ERROR );
\r
6627 stream_.state = STREAM_STOPPED;
\r
6628 MUTEX_UNLOCK( &stream_.mutex );
\r
6631 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
6632 unsigned int channels, unsigned int firstChannel,
\r
6633 unsigned int sampleRate, RtAudioFormat format,
\r
6634 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
6636 PulseAudioHandle *pah = 0;
\r
6637 unsigned long bufferBytes = 0;
\r
6638 pa_sample_spec ss;
\r
6640 if ( device != 0 ) return false;
\r
6641 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
6642 if ( channels != 1 && channels != 2 ) {
\r
6643 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
6646 ss.channels = channels;
\r
6648 if ( firstChannel != 0 ) return false;
\r
6650 bool sr_found = false;
\r
6651 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
6652 if ( sampleRate == *sr ) {
\r
6654 stream_.sampleRate = sampleRate;
\r
6655 ss.rate = sampleRate;
\r
6659 if ( !sr_found ) {
\r
6660 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
6664 bool sf_found = 0;
\r
6665 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
6666 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
6667 if ( format == sf->rtaudio_format ) {
\r
6669 stream_.userFormat = sf->rtaudio_format;
\r
6670 ss.format = sf->pa_format;
\r
6674 if ( !sf_found ) {
\r
6675 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format.";
\r
6679 if ( options && ( options->flags & RTAUDIO_NONINTERLEAVED ) ) {
\r
6680 errorText_ = "RtApiPulse::probeDeviceOpen: only interleaved audio data supported.";
\r
6684 stream_.userInterleaved = true;
\r
6685 stream_.nBuffers = 1;
\r
6687 stream_.deviceInterleaved[mode] = true;
\r
6688 stream_.doByteSwap[mode] = false;
\r
6689 stream_.doConvertBuffer[mode] = false;
\r
6690 stream_.deviceFormat[mode] = stream_.userFormat;
\r
6691 stream_.nUserChannels[mode] = channels;
\r
6692 stream_.nDeviceChannels[mode] = channels;
\r
6693 stream_.channelOffset[mode] = 0;
\r
6695 // Allocate necessary internal buffers.
\r
6696 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6697 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6698 if ( stream_.userBuffer[mode] == NULL ) {
\r
6699 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
6702 stream_.bufferSize = *bufferSize;
\r
6704 if ( !stream_.apiHandle ) {
\r
6705 PulseAudioHandle *pah = new PulseAudioHandle;
\r
6707 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
6711 stream_.apiHandle = pah;
\r
6712 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
6713 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
6717 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
6722 pah->s_rec = pa_simple_new( NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error );
\r
6723 if ( !pah->s_rec ) {
\r
6724 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
6729 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
6730 if ( !pah->s_play ) {
\r
6731 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
6739 if ( stream_.mode == UNINITIALIZED )
\r
6740 stream_.mode = mode;
\r
6741 else if ( stream_.mode == mode )
\r
6744 stream_.mode = DUPLEX;
\r
6746 stream_.state = STREAM_STOPPED;
\r
6748 if ( !stream_.callbackInfo.isRunning ) {
\r
6749 stream_.callbackInfo.object = this;
\r
6750 stream_.callbackInfo.isRunning = true;
\r
6751 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
6752 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
6763 //******************** End of __LINUX_PULSE__ *********************//
\r
6766 #if defined(__LINUX_OSS__)
\r
6768 #include <unistd.h>
\r
6769 #include <sys/ioctl.h>
\r
6770 #include <unistd.h>
\r
6771 #include <fcntl.h>
\r
6772 #include "soundcard.h"
\r
6773 #include <errno.h>
\r
6776 extern "C" void *ossCallbackHandler(void * ptr);
\r
6778 // A structure to hold various information related to the OSS API
\r
6779 // implementation.
\r
6780 struct OssHandle {
\r
6781 int id[2]; // device ids
\r
6784 pthread_cond_t runnable;
\r
6787 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6790 RtApiOss :: RtApiOss()
\r
6792 // Nothing to do here.
\r
6795 RtApiOss :: ~RtApiOss()
\r
6797 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6800 unsigned int RtApiOss :: getDeviceCount( void )
\r
6802 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6803 if ( mixerfd == -1 ) {
\r
6804 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6805 error( RtError::WARNING );
\r
6809 oss_sysinfo sysinfo;
\r
6810 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6812 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6813 error( RtError::WARNING );
\r
6818 return sysinfo.numaudios;
\r
6821 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6823 RtAudio::DeviceInfo info;
\r
6824 info.probed = false;
\r
6826 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6827 if ( mixerfd == -1 ) {
\r
6828 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6829 error( RtError::WARNING );
\r
6833 oss_sysinfo sysinfo;
\r
6834 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6835 if ( result == -1 ) {
\r
6837 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6838 error( RtError::WARNING );
\r
6842 unsigned nDevices = sysinfo.numaudios;
\r
6843 if ( nDevices == 0 ) {
\r
6845 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6846 error( RtError::INVALID_USE );
\r
6849 if ( device >= nDevices ) {
\r
6851 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6852 error( RtError::INVALID_USE );
\r
6855 oss_audioinfo ainfo;
\r
6856 ainfo.dev = device;
\r
6857 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6859 if ( result == -1 ) {
\r
6860 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6861 errorText_ = errorStream_.str();
\r
6862 error( RtError::WARNING );
\r
6867 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6868 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6869 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6870 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6871 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6874 // Probe data formats ... do for input
\r
6875 unsigned long mask = ainfo.iformats;
\r
6876 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6877 info.nativeFormats |= RTAUDIO_SINT16;
\r
6878 if ( mask & AFMT_S8 )
\r
6879 info.nativeFormats |= RTAUDIO_SINT8;
\r
6880 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6881 info.nativeFormats |= RTAUDIO_SINT32;
\r
6882 if ( mask & AFMT_FLOAT )
\r
6883 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6884 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6885 info.nativeFormats |= RTAUDIO_SINT24;
\r
6887 // Check that we have at least one supported format
\r
6888 if ( info.nativeFormats == 0 ) {
\r
6889 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6890 errorText_ = errorStream_.str();
\r
6891 error( RtError::WARNING );
\r
6895 // Probe the supported sample rates.
\r
6896 info.sampleRates.clear();
\r
6897 if ( ainfo.nrates ) {
\r
6898 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6899 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6900 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6901 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6908 // Check min and max rate values;
\r
6909 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6910 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6911 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6915 if ( info.sampleRates.size() == 0 ) {
\r
6916 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6917 errorText_ = errorStream_.str();
\r
6918 error( RtError::WARNING );
\r
6921 info.probed = true;
\r
6922 info.name = ainfo.name;
\r
6929 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6930 unsigned int firstChannel, unsigned int sampleRate,
\r
6931 RtAudioFormat format, unsigned int *bufferSize,
\r
6932 RtAudio::StreamOptions *options )
\r
6934 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6935 if ( mixerfd == -1 ) {
\r
6936 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6940 oss_sysinfo sysinfo;
\r
6941 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6942 if ( result == -1 ) {
\r
6944 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6948 unsigned nDevices = sysinfo.numaudios;
\r
6949 if ( nDevices == 0 ) {
\r
6950 // This should not happen because a check is made before this function is called.
\r
6952 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6956 if ( device >= nDevices ) {
\r
6957 // This should not happen because a check is made before this function is called.
\r
6959 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6963 oss_audioinfo ainfo;
\r
6964 ainfo.dev = device;
\r
6965 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6967 if ( result == -1 ) {
\r
6968 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6969 errorText_ = errorStream_.str();
\r
6973 // Check if device supports input or output
\r
6974 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6975 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6976 if ( mode == OUTPUT )
\r
6977 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6979 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6980 errorText_ = errorStream_.str();
\r
6985 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6986 if ( mode == OUTPUT )
\r
6987 flags |= O_WRONLY;
\r
6988 else { // mode == INPUT
\r
6989 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6990 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6991 close( handle->id[0] );
\r
6992 handle->id[0] = 0;
\r
6993 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6994 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6995 errorText_ = errorStream_.str();
\r
6998 // Check that the number previously set channels is the same.
\r
6999 if ( stream_.nUserChannels[0] != channels ) {
\r
7000 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
7001 errorText_ = errorStream_.str();
\r
7007 flags |= O_RDONLY;
\r
7010 // Set exclusive access if specified.
\r
7011 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
7013 // Try to open the device.
\r
7015 fd = open( ainfo.devnode, flags, 0 );
\r
7017 if ( errno == EBUSY )
\r
7018 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
7020 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
7021 errorText_ = errorStream_.str();
\r
7025 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
7027 if ( flags | O_RDWR ) {
\r
7028 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
7029 if ( result == -1) {
\r
7030 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
7031 errorText_ = errorStream_.str();
\r
7037 // Check the device channel support.
\r
7038 stream_.nUserChannels[mode] = channels;
\r
7039 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
7041 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
7042 errorText_ = errorStream_.str();
\r
7046 // Set the number of channels.
\r
7047 int deviceChannels = channels + firstChannel;
\r
7048 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
7049 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
7051 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
7052 errorText_ = errorStream_.str();
\r
7055 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7057 // Get the data format mask
\r
7059 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
7060 if ( result == -1 ) {
\r
7062 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
7063 errorText_ = errorStream_.str();
\r
7067 // Determine how to set the device format.
\r
7068 stream_.userFormat = format;
\r
7069 int deviceFormat = -1;
\r
7070 stream_.doByteSwap[mode] = false;
\r
7071 if ( format == RTAUDIO_SINT8 ) {
\r
7072 if ( mask & AFMT_S8 ) {
\r
7073 deviceFormat = AFMT_S8;
\r
7074 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7077 else if ( format == RTAUDIO_SINT16 ) {
\r
7078 if ( mask & AFMT_S16_NE ) {
\r
7079 deviceFormat = AFMT_S16_NE;
\r
7080 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7082 else if ( mask & AFMT_S16_OE ) {
\r
7083 deviceFormat = AFMT_S16_OE;
\r
7084 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7085 stream_.doByteSwap[mode] = true;
\r
7088 else if ( format == RTAUDIO_SINT24 ) {
\r
7089 if ( mask & AFMT_S24_NE ) {
\r
7090 deviceFormat = AFMT_S24_NE;
\r
7091 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7093 else if ( mask & AFMT_S24_OE ) {
\r
7094 deviceFormat = AFMT_S24_OE;
\r
7095 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7096 stream_.doByteSwap[mode] = true;
\r
7099 else if ( format == RTAUDIO_SINT32 ) {
\r
7100 if ( mask & AFMT_S32_NE ) {
\r
7101 deviceFormat = AFMT_S32_NE;
\r
7102 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7104 else if ( mask & AFMT_S32_OE ) {
\r
7105 deviceFormat = AFMT_S32_OE;
\r
7106 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7107 stream_.doByteSwap[mode] = true;
\r
7111 if ( deviceFormat == -1 ) {
\r
7112 // The user requested format is not natively supported by the device.
\r
7113 if ( mask & AFMT_S16_NE ) {
\r
7114 deviceFormat = AFMT_S16_NE;
\r
7115 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7117 else if ( mask & AFMT_S32_NE ) {
\r
7118 deviceFormat = AFMT_S32_NE;
\r
7119 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7121 else if ( mask & AFMT_S24_NE ) {
\r
7122 deviceFormat = AFMT_S24_NE;
\r
7123 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7125 else if ( mask & AFMT_S16_OE ) {
\r
7126 deviceFormat = AFMT_S16_OE;
\r
7127 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7128 stream_.doByteSwap[mode] = true;
\r
7130 else if ( mask & AFMT_S32_OE ) {
\r
7131 deviceFormat = AFMT_S32_OE;
\r
7132 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7133 stream_.doByteSwap[mode] = true;
\r
7135 else if ( mask & AFMT_S24_OE ) {
\r
7136 deviceFormat = AFMT_S24_OE;
\r
7137 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7138 stream_.doByteSwap[mode] = true;
\r
7140 else if ( mask & AFMT_S8) {
\r
7141 deviceFormat = AFMT_S8;
\r
7142 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7146 if ( stream_.deviceFormat[mode] == 0 ) {
\r
7147 // This really shouldn't happen ...
\r
7149 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
7150 errorText_ = errorStream_.str();
\r
7154 // Set the data format.
\r
7155 int temp = deviceFormat;
\r
7156 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
7157 if ( result == -1 || deviceFormat != temp ) {
\r
7159 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
7160 errorText_ = errorStream_.str();
\r
7164 // Attempt to set the buffer size. According to OSS, the minimum
\r
7165 // number of buffers is two. The supposed minimum buffer size is 16
\r
7166 // bytes, so that will be our lower bound. The argument to this
\r
7167 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
7168 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
7169 // We'll check the actual value used near the end of the setup
\r
7171 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
7172 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
7174 if ( options ) buffers = options->numberOfBuffers;
\r
7175 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
7176 if ( buffers < 2 ) buffers = 3;
\r
7177 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
7178 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
7179 if ( result == -1 ) {
\r
7181 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
7182 errorText_ = errorStream_.str();
\r
7185 stream_.nBuffers = buffers;
\r
7187 // Save buffer size (in sample frames).
\r
7188 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
7189 stream_.bufferSize = *bufferSize;
\r
7191 // Set the sample rate.
\r
7192 int srate = sampleRate;
\r
7193 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
7194 if ( result == -1 ) {
\r
7196 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
7197 errorText_ = errorStream_.str();
\r
7201 // Verify the sample rate setup worked.
\r
7202 if ( abs( srate - sampleRate ) > 100 ) {
\r
7204 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
7205 errorText_ = errorStream_.str();
\r
7208 stream_.sampleRate = sampleRate;
\r
7210 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
7211 // We're doing duplex setup here.
\r
7212 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
7213 stream_.nDeviceChannels[0] = deviceChannels;
\r
7216 // Set interleaving parameters.
\r
7217 stream_.userInterleaved = true;
\r
7218 stream_.deviceInterleaved[mode] = true;
\r
7219 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
7220 stream_.userInterleaved = false;
\r
7222 // Set flags for buffer conversion
\r
7223 stream_.doConvertBuffer[mode] = false;
\r
7224 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7225 stream_.doConvertBuffer[mode] = true;
\r
7226 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7227 stream_.doConvertBuffer[mode] = true;
\r
7228 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7229 stream_.nUserChannels[mode] > 1 )
\r
7230 stream_.doConvertBuffer[mode] = true;
\r
7232 // Allocate the stream handles if necessary and then save.
\r
7233 if ( stream_.apiHandle == 0 ) {
\r
7235 handle = new OssHandle;
\r
7237 catch ( std::bad_alloc& ) {
\r
7238 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
7242 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
7243 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
7247 stream_.apiHandle = (void *) handle;
\r
7250 handle = (OssHandle *) stream_.apiHandle;
\r
7252 handle->id[mode] = fd;
\r
7254 // Allocate necessary internal buffers.
\r
7255 unsigned long bufferBytes;
\r
7256 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7257 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7258 if ( stream_.userBuffer[mode] == NULL ) {
\r
7259 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
7263 if ( stream_.doConvertBuffer[mode] ) {
\r
7265 bool makeBuffer = true;
\r
7266 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7267 if ( mode == INPUT ) {
\r
7268 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7269 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7270 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7274 if ( makeBuffer ) {
\r
7275 bufferBytes *= *bufferSize;
\r
7276 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7277 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7278 if ( stream_.deviceBuffer == NULL ) {
\r
7279 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
7285 stream_.device[mode] = device;
\r
7286 stream_.state = STREAM_STOPPED;
\r
7288 // Setup the buffer conversion information structure.
\r
7289 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7291 // Setup thread if necessary.
\r
7292 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7293 // We had already set up an output stream.
\r
7294 stream_.mode = DUPLEX;
\r
7295 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
7298 stream_.mode = mode;
\r
7300 // Setup callback thread.
\r
7301 stream_.callbackInfo.object = (void *) this;
\r
7303 // Set the thread attributes for joinable and realtime scheduling
\r
7304 // priority. The higher priority will only take affect if the
\r
7305 // program is run as root or suid.
\r
7306 pthread_attr_t attr;
\r
7307 pthread_attr_init( &attr );
\r
7308 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7309 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7310 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7311 struct sched_param param;
\r
7312 int priority = options->priority;
\r
7313 int min = sched_get_priority_min( SCHED_RR );
\r
7314 int max = sched_get_priority_max( SCHED_RR );
\r
7315 if ( priority < min ) priority = min;
\r
7316 else if ( priority > max ) priority = max;
\r
7317 param.sched_priority = priority;
\r
7318 pthread_attr_setschedparam( &attr, ¶m );
\r
7319 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
7322 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7324 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
7327 stream_.callbackInfo.isRunning = true;
\r
7328 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
7329 pthread_attr_destroy( &attr );
\r
7331 stream_.callbackInfo.isRunning = false;
\r
7332 errorText_ = "RtApiOss::error creating callback thread!";
\r
7341 pthread_cond_destroy( &handle->runnable );
\r
7342 if ( handle->id[0] ) close( handle->id[0] );
\r
7343 if ( handle->id[1] ) close( handle->id[1] );
\r
7345 stream_.apiHandle = 0;
\r
7348 for ( int i=0; i<2; i++ ) {
\r
7349 if ( stream_.userBuffer[i] ) {
\r
7350 free( stream_.userBuffer[i] );
\r
7351 stream_.userBuffer[i] = 0;
\r
7355 if ( stream_.deviceBuffer ) {
\r
7356 free( stream_.deviceBuffer );
\r
7357 stream_.deviceBuffer = 0;
\r
7363 void RtApiOss :: closeStream()
\r
7365 if ( stream_.state == STREAM_CLOSED ) {
\r
7366 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7367 error( RtError::WARNING );
\r
7371 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7372 stream_.callbackInfo.isRunning = false;
\r
7373 MUTEX_LOCK( &stream_.mutex );
\r
7374 if ( stream_.state == STREAM_STOPPED )
\r
7375 pthread_cond_signal( &handle->runnable );
\r
7376 MUTEX_UNLOCK( &stream_.mutex );
\r
7377 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7379 if ( stream_.state == STREAM_RUNNING ) {
\r
7380 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7381 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7383 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7384 stream_.state = STREAM_STOPPED;
\r
7388 pthread_cond_destroy( &handle->runnable );
\r
7389 if ( handle->id[0] ) close( handle->id[0] );
\r
7390 if ( handle->id[1] ) close( handle->id[1] );
\r
7392 stream_.apiHandle = 0;
\r
7395 for ( int i=0; i<2; i++ ) {
\r
7396 if ( stream_.userBuffer[i] ) {
\r
7397 free( stream_.userBuffer[i] );
\r
7398 stream_.userBuffer[i] = 0;
\r
7402 if ( stream_.deviceBuffer ) {
\r
7403 free( stream_.deviceBuffer );
\r
7404 stream_.deviceBuffer = 0;
\r
7407 stream_.mode = UNINITIALIZED;
\r
7408 stream_.state = STREAM_CLOSED;
\r
7411 void RtApiOss :: startStream()
\r
7414 if ( stream_.state == STREAM_RUNNING ) {
\r
7415 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7416 error( RtError::WARNING );
\r
7420 MUTEX_LOCK( &stream_.mutex );
\r
7422 stream_.state = STREAM_RUNNING;
\r
7424 // No need to do anything else here ... OSS automatically starts
\r
7425 // when fed samples.
\r
7427 MUTEX_UNLOCK( &stream_.mutex );
\r
7429 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7430 pthread_cond_signal( &handle->runnable );
\r
7433 void RtApiOss :: stopStream()
\r
7436 if ( stream_.state == STREAM_STOPPED ) {
\r
7437 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7438 error( RtError::WARNING );
\r
7442 MUTEX_LOCK( &stream_.mutex );
\r
7444 // The state might change while waiting on a mutex.
\r
7445 if ( stream_.state == STREAM_STOPPED ) {
\r
7446 MUTEX_UNLOCK( &stream_.mutex );
\r
7451 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7452 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7454 // Flush the output with zeros a few times.
\r
7457 RtAudioFormat format;
\r
7459 if ( stream_.doConvertBuffer[0] ) {
\r
7460 buffer = stream_.deviceBuffer;
\r
7461 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7462 format = stream_.deviceFormat[0];
\r
7465 buffer = stream_.userBuffer[0];
\r
7466 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7467 format = stream_.userFormat;
\r
7470 memset( buffer, 0, samples * formatBytes(format) );
\r
7471 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7472 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7473 if ( result == -1 ) {
\r
7474 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7475 error( RtError::WARNING );
\r
7479 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7480 if ( result == -1 ) {
\r
7481 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7482 errorText_ = errorStream_.str();
\r
7485 handle->triggered = false;
\r
7488 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7489 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7490 if ( result == -1 ) {
\r
7491 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7492 errorText_ = errorStream_.str();
\r
7498 stream_.state = STREAM_STOPPED;
\r
7499 MUTEX_UNLOCK( &stream_.mutex );
\r
7501 if ( result != -1 ) return;
\r
7502 error( RtError::SYSTEM_ERROR );
\r
7505 void RtApiOss :: abortStream()
\r
7508 if ( stream_.state == STREAM_STOPPED ) {
\r
7509 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7510 error( RtError::WARNING );
\r
7514 MUTEX_LOCK( &stream_.mutex );
\r
7516 // The state might change while waiting on a mutex.
\r
7517 if ( stream_.state == STREAM_STOPPED ) {
\r
7518 MUTEX_UNLOCK( &stream_.mutex );
\r
7523 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7524 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7525 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7526 if ( result == -1 ) {
\r
7527 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7528 errorText_ = errorStream_.str();
\r
7531 handle->triggered = false;
\r
7534 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7535 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7536 if ( result == -1 ) {
\r
7537 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7538 errorText_ = errorStream_.str();
\r
7544 stream_.state = STREAM_STOPPED;
\r
7545 MUTEX_UNLOCK( &stream_.mutex );
\r
7547 if ( result != -1 ) return;
\r
7548 error( RtError::SYSTEM_ERROR );
\r
7551 void RtApiOss :: callbackEvent()
\r
7553 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7554 if ( stream_.state == STREAM_STOPPED ) {
\r
7555 MUTEX_LOCK( &stream_.mutex );
\r
7556 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7557 if ( stream_.state != STREAM_RUNNING ) {
\r
7558 MUTEX_UNLOCK( &stream_.mutex );
\r
7561 MUTEX_UNLOCK( &stream_.mutex );
\r
7564 if ( stream_.state == STREAM_CLOSED ) {
\r
7565 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7566 error( RtError::WARNING );
\r
7570 // Invoke user callback to get fresh output data.
\r
7571 int doStopStream = 0;
\r
7572 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7573 double streamTime = getStreamTime();
\r
7574 RtAudioStreamStatus status = 0;
\r
7575 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7576 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7577 handle->xrun[0] = false;
\r
7579 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7580 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7581 handle->xrun[1] = false;
\r
7583 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7584 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7585 if ( doStopStream == 2 ) {
\r
7586 this->abortStream();
\r
7590 MUTEX_LOCK( &stream_.mutex );
\r
7592 // The state might change while waiting on a mutex.
\r
7593 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7598 RtAudioFormat format;
\r
7600 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7602 // Setup parameters and do buffer conversion if necessary.
\r
7603 if ( stream_.doConvertBuffer[0] ) {
\r
7604 buffer = stream_.deviceBuffer;
\r
7605 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7606 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7607 format = stream_.deviceFormat[0];
\r
7610 buffer = stream_.userBuffer[0];
\r
7611 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7612 format = stream_.userFormat;
\r
7615 // Do byte swapping if necessary.
\r
7616 if ( stream_.doByteSwap[0] )
\r
7617 byteSwapBuffer( buffer, samples, format );
\r
7619 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7621 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7622 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7623 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7624 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7625 handle->triggered = true;
\r
7628 // Write samples to device.
\r
7629 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7631 if ( result == -1 ) {
\r
7632 // We'll assume this is an underrun, though there isn't a
\r
7633 // specific means for determining that.
\r
7634 handle->xrun[0] = true;
\r
7635 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7636 error( RtError::WARNING );
\r
7637 // Continue on to input section.
\r
7641 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7643 // Setup parameters.
\r
7644 if ( stream_.doConvertBuffer[1] ) {
\r
7645 buffer = stream_.deviceBuffer;
\r
7646 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7647 format = stream_.deviceFormat[1];
\r
7650 buffer = stream_.userBuffer[1];
\r
7651 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7652 format = stream_.userFormat;
\r
7655 // Read samples from device.
\r
7656 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7658 if ( result == -1 ) {
\r
7659 // We'll assume this is an overrun, though there isn't a
\r
7660 // specific means for determining that.
\r
7661 handle->xrun[1] = true;
\r
7662 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7663 error( RtError::WARNING );
\r
7667 // Do byte swapping if necessary.
\r
7668 if ( stream_.doByteSwap[1] )
\r
7669 byteSwapBuffer( buffer, samples, format );
\r
7671 // Do buffer conversion if necessary.
\r
7672 if ( stream_.doConvertBuffer[1] )
\r
7673 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7677 MUTEX_UNLOCK( &stream_.mutex );
\r
7679 RtApi::tickStreamTime();
\r
7680 if ( doStopStream == 1 ) this->stopStream();
\r
7683 extern "C" void *ossCallbackHandler( void *ptr )
\r
7685 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7686 RtApiOss *object = (RtApiOss *) info->object;
\r
7687 bool *isRunning = &info->isRunning;
\r
7689 while ( *isRunning == true ) {
\r
7690 pthread_testcancel();
\r
7691 object->callbackEvent();
\r
7694 pthread_exit( NULL );
\r
7697 //******************** End of __LINUX_OSS__ *********************//
\r
7701 // *************************************************** //
\r
7703 // Protected common (OS-independent) RtAudio methods.
\r
7705 // *************************************************** //
\r
7707 // This method can be modified to control the behavior of error
\r
7708 // message printing.
\r
7709 void RtApi :: error( RtError::Type type )
\r
7711 errorStream_.str(""); // clear the ostringstream
\r
7712 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7713 std::cerr << '\n' << errorText_ << "\n\n";
\r
7714 else if ( type != RtError::WARNING )
\r
7715 throw( RtError( errorText_, type ) );
\r
7718 void RtApi :: verifyStream()
\r
7720 if ( stream_.state == STREAM_CLOSED ) {
\r
7721 errorText_ = "RtApi:: a stream is not open!";
\r
7722 error( RtError::INVALID_USE );
\r
7726 void RtApi :: clearStreamInfo()
\r
7728 stream_.mode = UNINITIALIZED;
\r
7729 stream_.state = STREAM_CLOSED;
\r
7730 stream_.sampleRate = 0;
\r
7731 stream_.bufferSize = 0;
\r
7732 stream_.nBuffers = 0;
\r
7733 stream_.userFormat = 0;
\r
7734 stream_.userInterleaved = true;
\r
7735 stream_.streamTime = 0.0;
\r
7736 stream_.apiHandle = 0;
\r
7737 stream_.deviceBuffer = 0;
\r
7738 stream_.callbackInfo.callback = 0;
\r
7739 stream_.callbackInfo.userData = 0;
\r
7740 stream_.callbackInfo.isRunning = false;
\r
7741 for ( int i=0; i<2; i++ ) {
\r
7742 stream_.device[i] = 11111;
\r
7743 stream_.doConvertBuffer[i] = false;
\r
7744 stream_.deviceInterleaved[i] = true;
\r
7745 stream_.doByteSwap[i] = false;
\r
7746 stream_.nUserChannels[i] = 0;
\r
7747 stream_.nDeviceChannels[i] = 0;
\r
7748 stream_.channelOffset[i] = 0;
\r
7749 stream_.deviceFormat[i] = 0;
\r
7750 stream_.latency[i] = 0;
\r
7751 stream_.userBuffer[i] = 0;
\r
7752 stream_.convertInfo[i].channels = 0;
\r
7753 stream_.convertInfo[i].inJump = 0;
\r
7754 stream_.convertInfo[i].outJump = 0;
\r
7755 stream_.convertInfo[i].inFormat = 0;
\r
7756 stream_.convertInfo[i].outFormat = 0;
\r
7757 stream_.convertInfo[i].inOffset.clear();
\r
7758 stream_.convertInfo[i].outOffset.clear();
\r
7762 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7764 if ( format == RTAUDIO_SINT16 )
\r
7766 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
7768 else if ( format == RTAUDIO_FLOAT64 )
\r
7770 else if ( format == RTAUDIO_SINT24 )
\r
7772 else if ( format == RTAUDIO_SINT8 )
\r
7775 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7776 error( RtError::WARNING );
\r
7781 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7783 if ( mode == INPUT ) { // convert device to user buffer
\r
7784 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7785 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7786 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7787 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7789 else { // convert user to device buffer
\r
7790 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7791 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7792 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7793 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7796 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7797 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7799 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7801 // Set up the interleave/deinterleave offsets.
\r
7802 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7803 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7804 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7805 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7806 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7807 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7808 stream_.convertInfo[mode].inJump = 1;
\r
7812 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7813 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7814 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7815 stream_.convertInfo[mode].outJump = 1;
\r
7819 else { // no (de)interleaving
\r
7820 if ( stream_.userInterleaved ) {
\r
7821 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7822 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7823 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7827 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7828 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7829 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7830 stream_.convertInfo[mode].inJump = 1;
\r
7831 stream_.convertInfo[mode].outJump = 1;
\r
7836 // Add channel offset.
\r
7837 if ( firstChannel > 0 ) {
\r
7838 if ( stream_.deviceInterleaved[mode] ) {
\r
7839 if ( mode == OUTPUT ) {
\r
7840 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7841 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7844 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7845 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7849 if ( mode == OUTPUT ) {
\r
7850 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7851 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7854 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7855 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7861 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7863 // This function does format conversion, input/output channel compensation, and
\r
7864 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7865 // the lower three bytes of a 32-bit integer.
\r
7867 // Clear our device buffer when in/out duplex device channels are different
\r
7868 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7869 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7870 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7873 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7875 Float64 *out = (Float64 *)outBuffer;
\r
7877 if (info.inFormat == RTAUDIO_SINT8) {
\r
7878 signed char *in = (signed char *)inBuffer;
\r
7879 scale = 1.0 / 127.5;
\r
7880 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7881 for (j=0; j<info.channels; j++) {
\r
7882 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7883 out[info.outOffset[j]] += 0.5;
\r
7884 out[info.outOffset[j]] *= scale;
\r
7886 in += info.inJump;
\r
7887 out += info.outJump;
\r
7890 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7891 Int16 *in = (Int16 *)inBuffer;
\r
7892 scale = 1.0 / 32767.5;
\r
7893 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7894 for (j=0; j<info.channels; j++) {
\r
7895 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7896 out[info.outOffset[j]] += 0.5;
\r
7897 out[info.outOffset[j]] *= scale;
\r
7899 in += info.inJump;
\r
7900 out += info.outJump;
\r
7903 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7904 Int24 *in = (Int24 *)inBuffer;
\r
7905 scale = 1.0 / 8388607.5;
\r
7906 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7907 for (j=0; j<info.channels; j++) {
\r
7908 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
7909 out[info.outOffset[j]] += 0.5;
\r
7910 out[info.outOffset[j]] *= scale;
\r
7912 in += info.inJump;
\r
7913 out += info.outJump;
\r
7916 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7917 Int32 *in = (Int32 *)inBuffer;
\r
7918 scale = 1.0 / 2147483647.5;
\r
7919 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7920 for (j=0; j<info.channels; j++) {
\r
7921 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7922 out[info.outOffset[j]] += 0.5;
\r
7923 out[info.outOffset[j]] *= scale;
\r
7925 in += info.inJump;
\r
7926 out += info.outJump;
\r
7929 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7930 Float32 *in = (Float32 *)inBuffer;
\r
7931 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7932 for (j=0; j<info.channels; j++) {
\r
7933 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7935 in += info.inJump;
\r
7936 out += info.outJump;
\r
7939 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7940 // Channel compensation and/or (de)interleaving only.
\r
7941 Float64 *in = (Float64 *)inBuffer;
\r
7942 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7943 for (j=0; j<info.channels; j++) {
\r
7944 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7946 in += info.inJump;
\r
7947 out += info.outJump;
\r
7951 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7953 Float32 *out = (Float32 *)outBuffer;
\r
7955 if (info.inFormat == RTAUDIO_SINT8) {
\r
7956 signed char *in = (signed char *)inBuffer;
\r
7957 scale = (Float32) ( 1.0 / 127.5 );
\r
7958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7959 for (j=0; j<info.channels; j++) {
\r
7960 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7961 out[info.outOffset[j]] += 0.5;
\r
7962 out[info.outOffset[j]] *= scale;
\r
7964 in += info.inJump;
\r
7965 out += info.outJump;
\r
7968 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7969 Int16 *in = (Int16 *)inBuffer;
\r
7970 scale = (Float32) ( 1.0 / 32767.5 );
\r
7971 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7972 for (j=0; j<info.channels; j++) {
\r
7973 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7974 out[info.outOffset[j]] += 0.5;
\r
7975 out[info.outOffset[j]] *= scale;
\r
7977 in += info.inJump;
\r
7978 out += info.outJump;
\r
7981 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7982 Int24 *in = (Int24 *)inBuffer;
\r
7983 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7984 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7985 for (j=0; j<info.channels; j++) {
\r
7986 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
7987 out[info.outOffset[j]] += 0.5;
\r
7988 out[info.outOffset[j]] *= scale;
\r
7990 in += info.inJump;
\r
7991 out += info.outJump;
\r
7994 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7995 Int32 *in = (Int32 *)inBuffer;
\r
7996 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7997 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7998 for (j=0; j<info.channels; j++) {
\r
7999 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8000 out[info.outOffset[j]] += 0.5;
\r
8001 out[info.outOffset[j]] *= scale;
\r
8003 in += info.inJump;
\r
8004 out += info.outJump;
\r
8007 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8008 // Channel compensation and/or (de)interleaving only.
\r
8009 Float32 *in = (Float32 *)inBuffer;
\r
8010 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8011 for (j=0; j<info.channels; j++) {
\r
8012 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8014 in += info.inJump;
\r
8015 out += info.outJump;
\r
8018 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8019 Float64 *in = (Float64 *)inBuffer;
\r
8020 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8021 for (j=0; j<info.channels; j++) {
\r
8022 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
8024 in += info.inJump;
\r
8025 out += info.outJump;
\r
8029 else if (info.outFormat == RTAUDIO_SINT32) {
\r
8030 Int32 *out = (Int32 *)outBuffer;
\r
8031 if (info.inFormat == RTAUDIO_SINT8) {
\r
8032 signed char *in = (signed char *)inBuffer;
\r
8033 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8034 for (j=0; j<info.channels; j++) {
\r
8035 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8036 out[info.outOffset[j]] <<= 24;
\r
8038 in += info.inJump;
\r
8039 out += info.outJump;
\r
8042 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8043 Int16 *in = (Int16 *)inBuffer;
\r
8044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8045 for (j=0; j<info.channels; j++) {
\r
8046 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
8047 out[info.outOffset[j]] <<= 16;
\r
8049 in += info.inJump;
\r
8050 out += info.outJump;
\r
8053 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8054 Int24 *in = (Int24 *)inBuffer;
\r
8055 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8056 for (j=0; j<info.channels; j++) {
\r
8057 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
8058 out[info.outOffset[j]] <<= 8;
\r
8060 in += info.inJump;
\r
8061 out += info.outJump;
\r
8064 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8065 // Channel compensation and/or (de)interleaving only.
\r
8066 Int32 *in = (Int32 *)inBuffer;
\r
8067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8068 for (j=0; j<info.channels; j++) {
\r
8069 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8071 in += info.inJump;
\r
8072 out += info.outJump;
\r
8075 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8076 Float32 *in = (Float32 *)inBuffer;
\r
8077 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8078 for (j=0; j<info.channels; j++) {
\r
8079 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8081 in += info.inJump;
\r
8082 out += info.outJump;
\r
8085 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8086 Float64 *in = (Float64 *)inBuffer;
\r
8087 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8088 for (j=0; j<info.channels; j++) {
\r
8089 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
8091 in += info.inJump;
\r
8092 out += info.outJump;
\r
8096 else if (info.outFormat == RTAUDIO_SINT24) {
\r
8097 Int24 *out = (Int24 *)outBuffer;
\r
8098 if (info.inFormat == RTAUDIO_SINT8) {
\r
8099 signed char *in = (signed char *)inBuffer;
\r
8100 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8101 for (j=0; j<info.channels; j++) {
\r
8102 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
8103 //out[info.outOffset[j]] <<= 16;
\r
8105 in += info.inJump;
\r
8106 out += info.outJump;
\r
8109 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8110 Int16 *in = (Int16 *)inBuffer;
\r
8111 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8112 for (j=0; j<info.channels; j++) {
\r
8113 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
8114 //out[info.outOffset[j]] <<= 8;
\r
8116 in += info.inJump;
\r
8117 out += info.outJump;
\r
8120 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8121 // Channel compensation and/or (de)interleaving only.
\r
8122 Int24 *in = (Int24 *)inBuffer;
\r
8123 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8124 for (j=0; j<info.channels; j++) {
\r
8125 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8127 in += info.inJump;
\r
8128 out += info.outJump;
\r
8131 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8132 Int32 *in = (Int32 *)inBuffer;
\r
8133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8134 for (j=0; j<info.channels; j++) {
\r
8135 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
8136 //out[info.outOffset[j]] >>= 8;
\r
8138 in += info.inJump;
\r
8139 out += info.outJump;
\r
8142 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8143 Float32 *in = (Float32 *)inBuffer;
\r
8144 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8145 for (j=0; j<info.channels; j++) {
\r
8146 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8148 in += info.inJump;
\r
8149 out += info.outJump;
\r
8152 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8153 Float64 *in = (Float64 *)inBuffer;
\r
8154 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8155 for (j=0; j<info.channels; j++) {
\r
8156 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
8158 in += info.inJump;
\r
8159 out += info.outJump;
\r
8163 else if (info.outFormat == RTAUDIO_SINT16) {
\r
8164 Int16 *out = (Int16 *)outBuffer;
\r
8165 if (info.inFormat == RTAUDIO_SINT8) {
\r
8166 signed char *in = (signed char *)inBuffer;
\r
8167 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8168 for (j=0; j<info.channels; j++) {
\r
8169 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
8170 out[info.outOffset[j]] <<= 8;
\r
8172 in += info.inJump;
\r
8173 out += info.outJump;
\r
8176 else if (info.inFormat == RTAUDIO_SINT16) {
\r
8177 // Channel compensation and/or (de)interleaving only.
\r
8178 Int16 *in = (Int16 *)inBuffer;
\r
8179 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8180 for (j=0; j<info.channels; j++) {
\r
8181 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8183 in += info.inJump;
\r
8184 out += info.outJump;
\r
8187 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8188 Int24 *in = (Int24 *)inBuffer;
\r
8189 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8190 for (j=0; j<info.channels; j++) {
\r
8191 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
8193 in += info.inJump;
\r
8194 out += info.outJump;
\r
8197 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8198 Int32 *in = (Int32 *)inBuffer;
\r
8199 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8200 for (j=0; j<info.channels; j++) {
\r
8201 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
8203 in += info.inJump;
\r
8204 out += info.outJump;
\r
8207 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8208 Float32 *in = (Float32 *)inBuffer;
\r
8209 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8210 for (j=0; j<info.channels; j++) {
\r
8211 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8213 in += info.inJump;
\r
8214 out += info.outJump;
\r
8217 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8218 Float64 *in = (Float64 *)inBuffer;
\r
8219 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8220 for (j=0; j<info.channels; j++) {
\r
8221 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
8223 in += info.inJump;
\r
8224 out += info.outJump;
\r
8228 else if (info.outFormat == RTAUDIO_SINT8) {
\r
8229 signed char *out = (signed char *)outBuffer;
\r
8230 if (info.inFormat == RTAUDIO_SINT8) {
\r
8231 // Channel compensation and/or (de)interleaving only.
\r
8232 signed char *in = (signed char *)inBuffer;
\r
8233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8234 for (j=0; j<info.channels; j++) {
\r
8235 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
8237 in += info.inJump;
\r
8238 out += info.outJump;
\r
8241 if (info.inFormat == RTAUDIO_SINT16) {
\r
8242 Int16 *in = (Int16 *)inBuffer;
\r
8243 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8244 for (j=0; j<info.channels; j++) {
\r
8245 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
8247 in += info.inJump;
\r
8248 out += info.outJump;
\r
8251 else if (info.inFormat == RTAUDIO_SINT24) {
\r
8252 Int24 *in = (Int24 *)inBuffer;
\r
8253 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8254 for (j=0; j<info.channels; j++) {
\r
8255 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
8257 in += info.inJump;
\r
8258 out += info.outJump;
\r
8261 else if (info.inFormat == RTAUDIO_SINT32) {
\r
8262 Int32 *in = (Int32 *)inBuffer;
\r
8263 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8264 for (j=0; j<info.channels; j++) {
\r
8265 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
8267 in += info.inJump;
\r
8268 out += info.outJump;
\r
8271 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
8272 Float32 *in = (Float32 *)inBuffer;
\r
8273 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8274 for (j=0; j<info.channels; j++) {
\r
8275 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8277 in += info.inJump;
\r
8278 out += info.outJump;
\r
8281 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
8282 Float64 *in = (Float64 *)inBuffer;
\r
8283 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
8284 for (j=0; j<info.channels; j++) {
\r
8285 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
8287 in += info.inJump;
\r
8288 out += info.outJump;
\r
8294 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
8295 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
8296 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
8298 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
8300 register char val;
\r
8301 register char *ptr;
\r
8304 if ( format == RTAUDIO_SINT16 ) {
\r
8305 for ( unsigned int i=0; i<samples; i++ ) {
\r
8306 // Swap 1st and 2nd bytes.
\r
8308 *(ptr) = *(ptr+1);
\r
8311 // Increment 2 bytes.
\r
8315 else if ( format == RTAUDIO_SINT32 ||
\r
8316 format == RTAUDIO_FLOAT32 ) {
\r
8317 for ( unsigned int i=0; i<samples; i++ ) {
\r
8318 // Swap 1st and 4th bytes.
\r
8320 *(ptr) = *(ptr+3);
\r
8323 // Swap 2nd and 3rd bytes.
\r
8326 *(ptr) = *(ptr+1);
\r
8329 // Increment 3 more bytes.
\r
8333 else if ( format == RTAUDIO_SINT24 ) {
\r
8334 for ( unsigned int i=0; i<samples; i++ ) {
\r
8335 // Swap 1st and 3rd bytes.
\r
8337 *(ptr) = *(ptr+2);
\r
8340 // Increment 2 more bytes.
\r
8344 else if ( format == RTAUDIO_FLOAT64 ) {
\r
8345 for ( unsigned int i=0; i<samples; i++ ) {
\r
8346 // Swap 1st and 8th bytes
\r
8348 *(ptr) = *(ptr+7);
\r
8351 // Swap 2nd and 7th bytes
\r
8354 *(ptr) = *(ptr+5);
\r
8357 // Swap 3rd and 6th bytes
\r
8360 *(ptr) = *(ptr+3);
\r
8363 // Swap 4th and 5th bytes
\r
8366 *(ptr) = *(ptr+1);
\r
8369 // Increment 5 more bytes.
\r
8375 // Indentation settings for Vim and Emacs
\r
8377 // Local Variables:
\r
8378 // c-basic-offset: 2
\r
8379 // indent-tabs-mode: nil
\r
8382 // vim: et sts=2 sw=2
\r