1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2012 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.11
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_OSS__)
\r
91 apis.push_back( LINUX_OSS );
\r
93 #if defined(__WINDOWS_ASIO__)
\r
94 apis.push_back( WINDOWS_ASIO );
\r
96 #if defined(__WINDOWS_DS__)
\r
97 apis.push_back( WINDOWS_DS );
\r
99 #if defined(__MACOSX_CORE__)
\r
100 apis.push_back( MACOSX_CORE );
\r
102 #if defined(__RTAUDIO_DUMMY__)
\r
103 apis.push_back( RTAUDIO_DUMMY );
\r
107 void RtAudio :: openRtApi( RtAudio::Api api )
\r
113 #if defined(__UNIX_JACK__)
\r
114 if ( api == UNIX_JACK )
\r
115 rtapi_ = new RtApiJack();
\r
117 #if defined(__LINUX_ALSA__)
\r
118 if ( api == LINUX_ALSA )
\r
119 rtapi_ = new RtApiAlsa();
\r
121 #if defined(__LINUX_OSS__)
\r
122 if ( api == LINUX_OSS )
\r
123 rtapi_ = new RtApiOss();
\r
125 #if defined(__WINDOWS_ASIO__)
\r
126 if ( api == WINDOWS_ASIO )
\r
127 rtapi_ = new RtApiAsio();
\r
129 #if defined(__WINDOWS_DS__)
\r
130 if ( api == WINDOWS_DS )
\r
131 rtapi_ = new RtApiDs();
\r
133 #if defined(__MACOSX_CORE__)
\r
134 if ( api == MACOSX_CORE )
\r
135 rtapi_ = new RtApiCore();
\r
137 #if defined(__RTAUDIO_DUMMY__)
\r
138 if ( api == RTAUDIO_DUMMY )
\r
139 rtapi_ = new RtApiDummy();
\r
143 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
147 if ( api != UNSPECIFIED ) {
\r
148 // Attempt to open the specified API.
\r
150 if ( rtapi_ ) return;
\r
152 // No compiled support for specified API value. Issue a debug
\r
153 // warning and continue as if no API was specified.
\r
154 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
157 // Iterate through the compiled APIs and return as soon as we find
\r
158 // one with at least one device or we reach the end of the list.
\r
159 std::vector< RtAudio::Api > apis;
\r
160 getCompiledApi( apis );
\r
161 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
162 openRtApi( apis[i] );
\r
163 if ( rtapi_->getDeviceCount() ) break;
\r
166 if ( rtapi_ ) return;
\r
168 // It should not be possible to get here because the preprocessor
\r
169 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
170 // API-specific definitions are passed to the compiler. But just in
\r
171 // case something weird happens, we'll print out an error message.
\r
172 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
175 RtAudio :: ~RtAudio() throw()
\r
180 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
181 RtAudio::StreamParameters *inputParameters,
\r
182 RtAudioFormat format, unsigned int sampleRate,
\r
183 unsigned int *bufferFrames,
\r
184 RtAudioCallback callback, void *userData,
\r
185 RtAudio::StreamOptions *options )
\r
187 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
188 sampleRate, bufferFrames, callback,
\r
189 userData, options );
\r
192 // *************************************************** //
\r
194 // Public RtApi definitions (see end of file for
\r
195 // private or protected utility functions).
\r
197 // *************************************************** //
\r
201 stream_.state = STREAM_CLOSED;
\r
202 stream_.mode = UNINITIALIZED;
\r
203 stream_.apiHandle = 0;
\r
204 stream_.userBuffer[0] = 0;
\r
205 stream_.userBuffer[1] = 0;
\r
206 MUTEX_INITIALIZE( &stream_.mutex );
\r
207 showWarnings_ = true;
\r
212 MUTEX_DESTROY( &stream_.mutex );
\r
215 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
216 RtAudio::StreamParameters *iParams,
\r
217 RtAudioFormat format, unsigned int sampleRate,
\r
218 unsigned int *bufferFrames,
\r
219 RtAudioCallback callback, void *userData,
\r
220 RtAudio::StreamOptions *options )
\r
222 if ( stream_.state != STREAM_CLOSED ) {
\r
223 errorText_ = "RtApi::openStream: a stream is already open!";
\r
224 error( RtError::INVALID_USE );
\r
227 if ( oParams && oParams->nChannels < 1 ) {
\r
228 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
229 error( RtError::INVALID_USE );
\r
232 if ( iParams && iParams->nChannels < 1 ) {
\r
233 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
234 error( RtError::INVALID_USE );
\r
237 if ( oParams == NULL && iParams == NULL ) {
\r
238 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
239 error( RtError::INVALID_USE );
\r
242 if ( formatBytes(format) == 0 ) {
\r
243 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
244 error( RtError::INVALID_USE );
\r
247 unsigned int nDevices = getDeviceCount();
\r
248 unsigned int oChannels = 0;
\r
250 oChannels = oParams->nChannels;
\r
251 if ( oParams->deviceId >= nDevices ) {
\r
252 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
253 error( RtError::INVALID_USE );
\r
257 unsigned int iChannels = 0;
\r
259 iChannels = iParams->nChannels;
\r
260 if ( iParams->deviceId >= nDevices ) {
\r
261 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
262 error( RtError::INVALID_USE );
\r
269 if ( oChannels > 0 ) {
\r
271 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
272 sampleRate, format, bufferFrames, options );
\r
273 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
276 if ( iChannels > 0 ) {
\r
278 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
279 sampleRate, format, bufferFrames, options );
\r
280 if ( result == false ) {
\r
281 if ( oChannels > 0 ) closeStream();
\r
282 error( RtError::SYSTEM_ERROR );
\r
286 stream_.callbackInfo.callback = (void *) callback;
\r
287 stream_.callbackInfo.userData = userData;
\r
289 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
290 stream_.state = STREAM_STOPPED;
\r
293 unsigned int RtApi :: getDefaultInputDevice( void )
\r
295 // Should be implemented in subclasses if possible.
\r
299 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
301 // Should be implemented in subclasses if possible.
\r
305 void RtApi :: closeStream( void )
\r
307 // MUST be implemented in subclasses!
\r
311 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
312 unsigned int firstChannel, unsigned int sampleRate,
\r
313 RtAudioFormat format, unsigned int *bufferSize,
\r
314 RtAudio::StreamOptions *options )
\r
316 // MUST be implemented in subclasses!
\r
320 void RtApi :: tickStreamTime( void )
\r
322 // Subclasses that do not provide their own implementation of
\r
323 // getStreamTime should call this function once per buffer I/O to
\r
324 // provide basic stream time support.
\r
326 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
328 #if defined( HAVE_GETTIMEOFDAY )
\r
329 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
333 long RtApi :: getStreamLatency( void )
\r
337 long totalLatency = 0;
\r
338 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
339 totalLatency = stream_.latency[0];
\r
340 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
341 totalLatency += stream_.latency[1];
\r
343 return totalLatency;
\r
346 double RtApi :: getStreamTime( void )
\r
350 #if defined( HAVE_GETTIMEOFDAY )
\r
351 // Return a very accurate estimate of the stream time by
\r
352 // adding in the elapsed time since the last tick.
\r
353 struct timeval then;
\r
354 struct timeval now;
\r
356 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
357 return stream_.streamTime;
\r
359 gettimeofday( &now, NULL );
\r
360 then = stream_.lastTickTimestamp;
\r
361 return stream_.streamTime +
\r
362 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
363 (then.tv_sec + 0.000001 * then.tv_usec));
\r
365 return stream_.streamTime;
\r
369 unsigned int RtApi :: getStreamSampleRate( void )
\r
373 return stream_.sampleRate;
\r
377 // *************************************************** //
\r
379 // OS/API-specific methods.
\r
381 // *************************************************** //
\r
383 #if defined(__MACOSX_CORE__)
\r
385 // The OS X CoreAudio API is designed to use a separate callback
\r
386 // procedure for each of its audio devices. A single RtAudio duplex
\r
387 // stream using two different devices is supported here, though it
\r
388 // cannot be guaranteed to always behave correctly because we cannot
\r
389 // synchronize these two callbacks.
\r
391 // A property listener is installed for over/underrun information.
\r
392 // However, no functionality is currently provided to allow property
\r
393 // listeners to trigger user handlers because it is unclear what could
\r
394 // be done if a critical stream parameter (buffer size, sample rate,
\r
395 // device disconnect) notification arrived. The listeners entail
\r
396 // quite a bit of extra code and most likely, a user program wouldn't
\r
397 // be prepared for the result anyway. However, we do provide a flag
\r
398 // to the client callback function to inform of an over/underrun.
\r
400 // A structure to hold various information related to the CoreAudio API
\r
402 struct CoreHandle {
\r
403 AudioDeviceID id[2]; // device ids
\r
404 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
405 AudioDeviceIOProcID procId[2];
\r
407 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
408 UInt32 nStreams[2]; // number of streams to use
\r
410 char *deviceBuffer;
\r
411 pthread_cond_t condition;
\r
412 int drainCounter; // Tracks callback counts when draining
\r
413 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
416 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
419 ThreadHandle threadId;
\r
421 RtApiCore:: RtApiCore()
\r
423 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
424 // This is a largely undocumented but absolutely necessary
\r
425 // requirement starting with OS-X 10.6. If not called, queries and
\r
426 // updates to various audio device properties are not handled
\r
428 CFRunLoopRef theRunLoop = NULL;
\r
429 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
430 kAudioObjectPropertyScopeGlobal,
\r
431 kAudioObjectPropertyElementMaster };
\r
432 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
433 if ( result != noErr ) {
\r
434 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
435 error( RtError::WARNING );
\r
440 RtApiCore :: ~RtApiCore()
\r
442 // The subclass destructor gets called before the base class
\r
443 // destructor, so close an existing stream before deallocating
\r
444 // apiDeviceId memory.
\r
445 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
448 unsigned int RtApiCore :: getDeviceCount( void )
\r
450 // Find out how many audio devices there are, if any.
\r
452 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
453 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
454 if ( result != noErr ) {
\r
455 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
456 error( RtError::WARNING );
\r
460 return dataSize / sizeof( AudioDeviceID );
\r
463 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
465 unsigned int nDevices = getDeviceCount();
\r
466 if ( nDevices <= 1 ) return 0;
\r
469 UInt32 dataSize = sizeof( AudioDeviceID );
\r
470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
471 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
472 if ( result != noErr ) {
\r
473 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
474 error( RtError::WARNING );
\r
478 dataSize *= nDevices;
\r
479 AudioDeviceID deviceList[ nDevices ];
\r
480 property.mSelector = kAudioHardwarePropertyDevices;
\r
481 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
482 if ( result != noErr ) {
\r
483 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
484 error( RtError::WARNING );
\r
488 for ( unsigned int i=0; i<nDevices; i++ )
\r
489 if ( id == deviceList[i] ) return i;
\r
491 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
492 error( RtError::WARNING );
\r
496 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
498 unsigned int nDevices = getDeviceCount();
\r
499 if ( nDevices <= 1 ) return 0;
\r
502 UInt32 dataSize = sizeof( AudioDeviceID );
\r
503 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
504 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
505 if ( result != noErr ) {
\r
506 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
507 error( RtError::WARNING );
\r
511 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
512 AudioDeviceID deviceList[ nDevices ];
\r
513 property.mSelector = kAudioHardwarePropertyDevices;
\r
514 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
517 error( RtError::WARNING );
\r
521 for ( unsigned int i=0; i<nDevices; i++ )
\r
522 if ( id == deviceList[i] ) return i;
\r
524 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
525 error( RtError::WARNING );
\r
529 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
531 RtAudio::DeviceInfo info;
\r
532 info.probed = false;
\r
535 unsigned int nDevices = getDeviceCount();
\r
536 if ( nDevices == 0 ) {
\r
537 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
538 error( RtError::INVALID_USE );
\r
541 if ( device >= nDevices ) {
\r
542 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
543 error( RtError::INVALID_USE );
\r
546 AudioDeviceID deviceList[ nDevices ];
\r
547 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
548 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
549 kAudioObjectPropertyScopeGlobal,
\r
550 kAudioObjectPropertyElementMaster };
\r
551 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
552 0, NULL, &dataSize, (void *) &deviceList );
\r
553 if ( result != noErr ) {
\r
554 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
555 error( RtError::WARNING );
\r
559 AudioDeviceID id = deviceList[ device ];
\r
561 // Get the device name.
\r
563 CFStringRef cfname;
\r
564 dataSize = sizeof( CFStringRef );
\r
565 property.mSelector = kAudioObjectPropertyManufacturer;
\r
566 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
567 if ( result != noErr ) {
\r
568 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
569 errorText_ = errorStream_.str();
\r
570 error( RtError::WARNING );
\r
574 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
575 int length = CFStringGetLength(cfname);
\r
576 char *mname = (char *)malloc(length * 3 + 1);
\r
577 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
578 info.name.append( (const char *)mname, strlen(mname) );
\r
579 info.name.append( ": " );
\r
580 CFRelease( cfname );
\r
583 property.mSelector = kAudioObjectPropertyName;
\r
584 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
585 if ( result != noErr ) {
\r
586 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
587 errorText_ = errorStream_.str();
\r
588 error( RtError::WARNING );
\r
592 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
593 length = CFStringGetLength(cfname);
\r
594 char *name = (char *)malloc(length * 3 + 1);
\r
595 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
596 info.name.append( (const char *)name, strlen(name) );
\r
597 CFRelease( cfname );
\r
600 // Get the output stream "configuration".
\r
601 AudioBufferList *bufferList = nil;
\r
602 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
603 property.mScope = kAudioDevicePropertyScopeOutput;
\r
604 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
606 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
607 if ( result != noErr || dataSize == 0 ) {
\r
608 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
609 errorText_ = errorStream_.str();
\r
610 error( RtError::WARNING );
\r
614 // Allocate the AudioBufferList.
\r
615 bufferList = (AudioBufferList *) malloc( dataSize );
\r
616 if ( bufferList == NULL ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
618 error( RtError::WARNING );
\r
622 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
623 if ( result != noErr || dataSize == 0 ) {
\r
624 free( bufferList );
\r
625 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
626 errorText_ = errorStream_.str();
\r
627 error( RtError::WARNING );
\r
631 // Get output channel information.
\r
632 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
633 for ( i=0; i<nStreams; i++ )
\r
634 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
635 free( bufferList );
\r
637 // Get the input stream "configuration".
\r
638 property.mScope = kAudioDevicePropertyScopeInput;
\r
639 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
640 if ( result != noErr || dataSize == 0 ) {
\r
641 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
642 errorText_ = errorStream_.str();
\r
643 error( RtError::WARNING );
\r
647 // Allocate the AudioBufferList.
\r
648 bufferList = (AudioBufferList *) malloc( dataSize );
\r
649 if ( bufferList == NULL ) {
\r
650 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
651 error( RtError::WARNING );
\r
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
656 if (result != noErr || dataSize == 0) {
\r
657 free( bufferList );
\r
658 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
659 errorText_ = errorStream_.str();
\r
660 error( RtError::WARNING );
\r
664 // Get input channel information.
\r
665 nStreams = bufferList->mNumberBuffers;
\r
666 for ( i=0; i<nStreams; i++ )
\r
667 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
668 free( bufferList );
\r
670 // If device opens for both playback and capture, we determine the channels.
\r
671 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
672 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
674 // Probe the device sample rates.
\r
675 bool isInput = false;
\r
676 if ( info.outputChannels == 0 ) isInput = true;
\r
678 // Determine the supported sample rates.
\r
679 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
680 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
682 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
684 errorText_ = errorStream_.str();
\r
685 error( RtError::WARNING );
\r
689 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
690 AudioValueRange rangeList[ nRanges ];
\r
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
692 if ( result != kAudioHardwareNoError ) {
\r
693 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
694 errorText_ = errorStream_.str();
\r
695 error( RtError::WARNING );
\r
699 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
700 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
701 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
702 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
705 info.sampleRates.clear();
\r
706 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
707 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
708 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
711 if ( info.sampleRates.size() == 0 ) {
\r
712 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtError::WARNING );
\r
718 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
719 // Thus, any other "physical" formats supported by the device are of
\r
720 // no interest to the client.
\r
721 info.nativeFormats = RTAUDIO_FLOAT32;
\r
723 if ( info.outputChannels > 0 )
\r
724 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
725 if ( info.inputChannels > 0 )
\r
726 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
728 info.probed = true;
\r
732 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
733 const AudioTimeStamp* inNow,
\r
734 const AudioBufferList* inInputData,
\r
735 const AudioTimeStamp* inInputTime,
\r
736 AudioBufferList* outOutputData,
\r
737 const AudioTimeStamp* inOutputTime,
\r
738 void* infoPointer )
\r
740 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
742 RtApiCore *object = (RtApiCore *) info->object;
\r
743 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
744 return kAudioHardwareUnspecifiedError;
\r
746 return kAudioHardwareNoError;
\r
749 OSStatus xrunListener( AudioObjectID inDevice,
\r
751 const AudioObjectPropertyAddress properties[],
\r
752 void* handlePointer )
\r
754 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
755 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
756 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
757 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
758 handle->xrun[1] = true;
\r
760 handle->xrun[0] = true;
\r
764 return kAudioHardwareNoError;
\r
767 OSStatus rateListener( AudioObjectID inDevice,
\r
769 const AudioObjectPropertyAddress properties[],
\r
770 void* ratePointer )
\r
773 Float64 *rate = (Float64 *) ratePointer;
\r
774 UInt32 dataSize = sizeof( Float64 );
\r
775 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
776 kAudioObjectPropertyScopeGlobal,
\r
777 kAudioObjectPropertyElementMaster };
\r
778 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
779 return kAudioHardwareNoError;
\r
782 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
783 unsigned int firstChannel, unsigned int sampleRate,
\r
784 RtAudioFormat format, unsigned int *bufferSize,
\r
785 RtAudio::StreamOptions *options )
\r
788 unsigned int nDevices = getDeviceCount();
\r
789 if ( nDevices == 0 ) {
\r
790 // This should not happen because a check is made before this function is called.
\r
791 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
795 if ( device >= nDevices ) {
\r
796 // This should not happen because a check is made before this function is called.
\r
797 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
801 AudioDeviceID deviceList[ nDevices ];
\r
802 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
803 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
804 kAudioObjectPropertyScopeGlobal,
\r
805 kAudioObjectPropertyElementMaster };
\r
806 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
807 0, NULL, &dataSize, (void *) &deviceList );
\r
808 if ( result != noErr ) {
\r
809 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
813 AudioDeviceID id = deviceList[ device ];
\r
815 // Setup for stream mode.
\r
816 bool isInput = false;
\r
817 if ( mode == INPUT ) {
\r
819 property.mScope = kAudioDevicePropertyScopeInput;
\r
822 property.mScope = kAudioDevicePropertyScopeOutput;
\r
824 // Get the stream "configuration".
\r
825 AudioBufferList *bufferList = nil;
\r
827 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
828 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
829 if ( result != noErr || dataSize == 0 ) {
\r
830 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
831 errorText_ = errorStream_.str();
\r
835 // Allocate the AudioBufferList.
\r
836 bufferList = (AudioBufferList *) malloc( dataSize );
\r
837 if ( bufferList == NULL ) {
\r
838 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
842 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
843 if (result != noErr || dataSize == 0) {
\r
844 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
845 errorText_ = errorStream_.str();
\r
849 // Search for one or more streams that contain the desired number of
\r
850 // channels. CoreAudio devices can have an arbitrary number of
\r
851 // streams and each stream can have an arbitrary number of channels.
\r
852 // For each stream, a single buffer of interleaved samples is
\r
853 // provided. RtAudio prefers the use of one stream of interleaved
\r
854 // data or multiple consecutive single-channel streams. However, we
\r
855 // now support multiple consecutive multi-channel streams of
\r
856 // interleaved data as well.
\r
857 UInt32 iStream, offsetCounter = firstChannel;
\r
858 UInt32 nStreams = bufferList->mNumberBuffers;
\r
859 bool monoMode = false;
\r
860 bool foundStream = false;
\r
862 // First check that the device supports the requested number of
\r
864 UInt32 deviceChannels = 0;
\r
865 for ( iStream=0; iStream<nStreams; iStream++ )
\r
866 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
868 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
869 free( bufferList );
\r
870 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
871 errorText_ = errorStream_.str();
\r
875 // Look for a single stream meeting our needs.
\r
876 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
877 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
878 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
879 if ( streamChannels >= channels + offsetCounter ) {
\r
880 firstStream = iStream;
\r
881 channelOffset = offsetCounter;
\r
882 foundStream = true;
\r
885 if ( streamChannels > offsetCounter ) break;
\r
886 offsetCounter -= streamChannels;
\r
889 // If we didn't find a single stream above, then we should be able
\r
890 // to meet the channel specification with multiple streams.
\r
891 if ( foundStream == false ) {
\r
893 offsetCounter = firstChannel;
\r
894 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
895 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
896 if ( streamChannels > offsetCounter ) break;
\r
897 offsetCounter -= streamChannels;
\r
900 firstStream = iStream;
\r
901 channelOffset = offsetCounter;
\r
902 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
904 if ( streamChannels > 1 ) monoMode = false;
\r
905 while ( channelCounter > 0 ) {
\r
906 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
907 if ( streamChannels > 1 ) monoMode = false;
\r
908 channelCounter -= streamChannels;
\r
913 free( bufferList );
\r
915 // Determine the buffer size.
\r
916 AudioValueRange bufferRange;
\r
917 dataSize = sizeof( AudioValueRange );
\r
918 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
919 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
921 if ( result != noErr ) {
\r
922 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
923 errorText_ = errorStream_.str();
\r
927 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
928 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
929 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
931 // Set the buffer size. For multiple streams, I'm assuming we only
\r
932 // need to make this setting for the master channel.
\r
933 UInt32 theSize = (UInt32) *bufferSize;
\r
934 dataSize = sizeof( UInt32 );
\r
935 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
936 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
938 if ( result != noErr ) {
\r
939 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
940 errorText_ = errorStream_.str();
\r
944 // If attempting to setup a duplex stream, the bufferSize parameter
\r
945 // MUST be the same in both directions!
\r
946 *bufferSize = theSize;
\r
947 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
948 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
949 errorText_ = errorStream_.str();
\r
953 stream_.bufferSize = *bufferSize;
\r
954 stream_.nBuffers = 1;
\r
956 // Try to set "hog" mode ... it's not clear to me this is working.
\r
957 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
959 dataSize = sizeof( hog_pid );
\r
960 property.mSelector = kAudioDevicePropertyHogMode;
\r
961 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
962 if ( result != noErr ) {
\r
963 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
964 errorText_ = errorStream_.str();
\r
968 if ( hog_pid != getpid() ) {
\r
969 hog_pid = getpid();
\r
970 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
971 if ( result != noErr ) {
\r
972 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
973 errorText_ = errorStream_.str();
\r
979 // Check and if necessary, change the sample rate for the device.
\r
980 Float64 nominalRate;
\r
981 dataSize = sizeof( Float64 );
\r
982 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
983 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
985 if ( result != noErr ) {
\r
986 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
987 errorText_ = errorStream_.str();
\r
991 // Only change the sample rate if off by more than 1 Hz.
\r
992 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
994 // Set a property listener for the sample rate change
\r
995 Float64 reportedRate = 0.0;
\r
996 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
997 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
998 if ( result != noErr ) {
\r
999 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1000 errorText_ = errorStream_.str();
\r
1004 nominalRate = (Float64) sampleRate;
\r
1005 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1007 if ( result != noErr ) {
\r
1008 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1009 errorText_ = errorStream_.str();
\r
1013 // Now wait until the reported nominal rate is what we just set.
\r
1014 UInt32 microCounter = 0;
\r
1015 while ( reportedRate != nominalRate ) {
\r
1016 microCounter += 5000;
\r
1017 if ( microCounter > 5000000 ) break;
\r
1021 // Remove the property listener.
\r
1022 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1024 if ( microCounter > 5000000 ) {
\r
1025 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1026 errorText_ = errorStream_.str();
\r
1031 // Now set the stream format for all streams. Also, check the
\r
1032 // physical format of the device and change that if necessary.
\r
1033 AudioStreamBasicDescription description;
\r
1034 dataSize = sizeof( AudioStreamBasicDescription );
\r
1035 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1036 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // Set the sample rate and data format id. However, only make the
\r
1044 // change if the sample rate is not within 1.0 of the desired
\r
1045 // rate and the format is not linear pcm.
\r
1046 bool updateFormat = false;
\r
1047 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1048 description.mSampleRate = (Float64) sampleRate;
\r
1049 updateFormat = true;
\r
1052 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1053 description.mFormatID = kAudioFormatLinearPCM;
\r
1054 updateFormat = true;
\r
1057 if ( updateFormat ) {
\r
1058 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1059 if ( result != noErr ) {
\r
1060 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1061 errorText_ = errorStream_.str();
\r
1066 // Now check the physical format.
\r
1067 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1068 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1069 if ( result != noErr ) {
\r
1070 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1071 errorText_ = errorStream_.str();
\r
1075 //std::cout << "Current physical stream format:" << std::endl;
\r
1076 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1077 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1078 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1079 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1081 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1082 description.mFormatID = kAudioFormatLinearPCM;
\r
1083 //description.mSampleRate = (Float64) sampleRate;
\r
1084 AudioStreamBasicDescription testDescription = description;
\r
1085 UInt32 formatFlags;
\r
1087 // We'll try higher bit rates first and then work our way down.
\r
1088 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1089 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1090 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1091 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1092 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1093 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1094 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1095 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1096 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1097 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1098 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1099 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1100 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1102 bool setPhysicalFormat = false;
\r
1103 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1104 testDescription = description;
\r
1105 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1106 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1107 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1108 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1110 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1111 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1112 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1113 if ( result == noErr ) {
\r
1114 setPhysicalFormat = true;
\r
1115 //std::cout << "Updated physical stream format:" << std::endl;
\r
1116 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1117 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1118 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1119 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1124 if ( !setPhysicalFormat ) {
\r
1125 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1126 errorText_ = errorStream_.str();
\r
1129 } // done setting virtual/physical formats.
\r
1131 // Get the stream / device latency.
\r
1133 dataSize = sizeof( UInt32 );
\r
1134 property.mSelector = kAudioDevicePropertyLatency;
\r
1135 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1136 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1137 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1139 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1140 errorText_ = errorStream_.str();
\r
1141 error( RtError::WARNING );
\r
1145 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1146 // always be presented in native-endian format, so we should never
\r
1147 // need to byte swap.
\r
1148 stream_.doByteSwap[mode] = false;
\r
1150 // From the CoreAudio documentation, PCM data must be supplied as
\r
1152 stream_.userFormat = format;
\r
1153 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1155 if ( streamCount == 1 )
\r
1156 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1157 else // multiple streams
\r
1158 stream_.nDeviceChannels[mode] = channels;
\r
1159 stream_.nUserChannels[mode] = channels;
\r
1160 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1161 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1162 else stream_.userInterleaved = true;
\r
1163 stream_.deviceInterleaved[mode] = true;
\r
1164 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1166 // Set flags for buffer conversion.
\r
1167 stream_.doConvertBuffer[mode] = false;
\r
1168 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1169 stream_.doConvertBuffer[mode] = true;
\r
1170 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1171 stream_.doConvertBuffer[mode] = true;
\r
1172 if ( streamCount == 1 ) {
\r
1173 if ( stream_.nUserChannels[mode] > 1 &&
\r
1174 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1175 stream_.doConvertBuffer[mode] = true;
\r
1177 else if ( monoMode && stream_.userInterleaved )
\r
1178 stream_.doConvertBuffer[mode] = true;
\r
1180 // Allocate our CoreHandle structure for the stream.
\r
1181 CoreHandle *handle = 0;
\r
1182 if ( stream_.apiHandle == 0 ) {
\r
1184 handle = new CoreHandle;
\r
1186 catch ( std::bad_alloc& ) {
\r
1187 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1191 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1192 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1195 stream_.apiHandle = (void *) handle;
\r
1198 handle = (CoreHandle *) stream_.apiHandle;
\r
1199 handle->iStream[mode] = firstStream;
\r
1200 handle->nStreams[mode] = streamCount;
\r
1201 handle->id[mode] = id;
\r
1203 // Allocate necessary internal buffers.
\r
1204 unsigned long bufferBytes;
\r
1205 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1206 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1207 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1208 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1209 if ( stream_.userBuffer[mode] == NULL ) {
\r
1210 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1214 // If possible, we will make use of the CoreAudio stream buffers as
\r
1215 // "device buffers". However, we can't do this if using multiple
\r
1217 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1219 bool makeBuffer = true;
\r
1220 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1221 if ( mode == INPUT ) {
\r
1222 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1223 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1224 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1228 if ( makeBuffer ) {
\r
1229 bufferBytes *= *bufferSize;
\r
1230 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1231 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1232 if ( stream_.deviceBuffer == NULL ) {
\r
1233 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1239 stream_.sampleRate = sampleRate;
\r
1240 stream_.device[mode] = device;
\r
1241 stream_.state = STREAM_STOPPED;
\r
1242 stream_.callbackInfo.object = (void *) this;
\r
1244 // Setup the buffer conversion information structure.
\r
1245 if ( stream_.doConvertBuffer[mode] ) {
\r
1246 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1247 else setConvertInfo( mode, channelOffset );
\r
1250 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1251 // Only one callback procedure per device.
\r
1252 stream_.mode = DUPLEX;
\r
1254 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1255 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1257 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1258 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1260 if ( result != noErr ) {
\r
1261 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1262 errorText_ = errorStream_.str();
\r
1265 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1266 stream_.mode = DUPLEX;
\r
1268 stream_.mode = mode;
\r
1271 // Setup the device property listener for over/underload.
\r
1272 property.mSelector = kAudioDeviceProcessorOverload;
\r
1273 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1279 pthread_cond_destroy( &handle->condition );
\r
1281 stream_.apiHandle = 0;
\r
1284 for ( int i=0; i<2; i++ ) {
\r
1285 if ( stream_.userBuffer[i] ) {
\r
1286 free( stream_.userBuffer[i] );
\r
1287 stream_.userBuffer[i] = 0;
\r
1291 if ( stream_.deviceBuffer ) {
\r
1292 free( stream_.deviceBuffer );
\r
1293 stream_.deviceBuffer = 0;
\r
1299 void RtApiCore :: closeStream( void )
\r
1301 if ( stream_.state == STREAM_CLOSED ) {
\r
1302 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1303 error( RtError::WARNING );
\r
1307 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1308 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1309 if ( stream_.state == STREAM_RUNNING )
\r
1310 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1311 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1312 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1314 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1315 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1319 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1320 if ( stream_.state == STREAM_RUNNING )
\r
1321 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1322 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1323 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1325 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1326 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1330 for ( int i=0; i<2; i++ ) {
\r
1331 if ( stream_.userBuffer[i] ) {
\r
1332 free( stream_.userBuffer[i] );
\r
1333 stream_.userBuffer[i] = 0;
\r
1337 if ( stream_.deviceBuffer ) {
\r
1338 free( stream_.deviceBuffer );
\r
1339 stream_.deviceBuffer = 0;
\r
1342 // Destroy pthread condition variable.
\r
1343 pthread_cond_destroy( &handle->condition );
\r
1345 stream_.apiHandle = 0;
\r
1347 stream_.mode = UNINITIALIZED;
\r
1348 stream_.state = STREAM_CLOSED;
\r
1351 void RtApiCore :: startStream( void )
\r
1354 if ( stream_.state == STREAM_RUNNING ) {
\r
1355 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1356 error( RtError::WARNING );
\r
1360 //MUTEX_LOCK( &stream_.mutex );
\r
1362 OSStatus result = noErr;
\r
1363 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1364 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1366 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1367 if ( result != noErr ) {
\r
1368 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1369 errorText_ = errorStream_.str();
\r
1374 if ( stream_.mode == INPUT ||
\r
1375 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1377 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1378 if ( result != noErr ) {
\r
1379 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1380 errorText_ = errorStream_.str();
\r
1385 handle->drainCounter = 0;
\r
1386 handle->internalDrain = false;
\r
1387 stream_.state = STREAM_RUNNING;
\r
1390 //MUTEX_UNLOCK( &stream_.mutex );
\r
1392 if ( result == noErr ) return;
\r
1393 error( RtError::SYSTEM_ERROR );
\r
1396 void RtApiCore :: stopStream( void )
\r
1399 if ( stream_.state == STREAM_STOPPED ) {
\r
1400 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1401 error( RtError::WARNING );
\r
1406 MUTEX_LOCK( &stream_.mutex );
\r
1408 if ( stream_.state == STREAM_STOPPED ) {
\r
1409 MUTEX_UNLOCK( &stream_.mutex );
\r
1414 OSStatus result = noErr;
\r
1415 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1416 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1418 if ( handle->drainCounter == 0 ) {
\r
1419 handle->drainCounter = 2;
\r
1420 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1423 //MUTEX_UNLOCK( &stream_.mutex );
\r
1424 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1425 //MUTEX_LOCK( &stream_.mutex );
\r
1426 if ( result != noErr ) {
\r
1427 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1428 errorText_ = errorStream_.str();
\r
1433 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1435 //MUTEX_UNLOCK( &stream_.mutex );
\r
1436 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1437 //MUTEX_LOCK( &stream_.mutex );
\r
1438 if ( result != noErr ) {
\r
1439 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1440 errorText_ = errorStream_.str();
\r
1445 stream_.state = STREAM_STOPPED;
\r
1448 //MUTEX_UNLOCK( &stream_.mutex );
\r
1450 if ( result == noErr ) return;
\r
1451 error( RtError::SYSTEM_ERROR );
\r
1454 void RtApiCore :: abortStream( void )
\r
1457 if ( stream_.state == STREAM_STOPPED ) {
\r
1458 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1459 error( RtError::WARNING );
\r
1463 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1464 handle->drainCounter = 2;
\r
1469 // This function will be called by a spawned thread when the user
\r
1470 // callback function signals that the stream should be stopped or
\r
1471 // aborted. It is better to handle it this way because the
\r
1472 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1473 // function is called.
\r
1474 extern "C" void *coreStopStream( void *ptr )
\r
1476 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1477 RtApiCore *object = (RtApiCore *) info->object;
\r
1479 object->stopStream();
\r
1481 pthread_exit( NULL );
\r
1484 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1485 const AudioBufferList *inBufferList,
\r
1486 const AudioBufferList *outBufferList )
\r
1488 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1489 if ( stream_.state == STREAM_CLOSED ) {
\r
1490 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1491 error( RtError::WARNING );
\r
1495 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1496 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1498 // Check if we were draining the stream and signal is finished.
\r
1499 if ( handle->drainCounter > 3 ) {
\r
1501 if ( handle->internalDrain == true ) {
\r
1502 stream_.state = STREAM_STOPPING;
\r
1503 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1506 else // external call to stopStream()
\r
1507 pthread_cond_signal( &handle->condition );
\r
1512 MUTEX_LOCK( &stream_.mutex );
\r
1514 // The state might change while waiting on a mutex.
\r
1515 if ( stream_.state == STREAM_STOPPED ) {
\r
1516 MUTEX_UNLOCK( &stream_.mutex );
\r
1521 AudioDeviceID outputDevice = handle->id[0];
\r
1523 // Invoke user callback to get fresh output data UNLESS we are
\r
1524 // draining stream or duplex mode AND the input/output devices are
\r
1525 // different AND this function is called for the input device.
\r
1526 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1527 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1528 double streamTime = getStreamTime();
\r
1529 RtAudioStreamStatus status = 0;
\r
1530 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1531 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1532 handle->xrun[0] = false;
\r
1534 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1535 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1536 handle->xrun[1] = false;
\r
1539 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1540 stream_.bufferSize, streamTime, status, info->userData );
\r
1541 if ( cbReturnValue == 2 ) {
\r
1542 //MUTEX_UNLOCK( &stream_.mutex );
\r
1543 handle->drainCounter = 2;
\r
1547 else if ( cbReturnValue == 1 ) {
\r
1548 handle->drainCounter = 1;
\r
1549 handle->internalDrain = true;
\r
1553 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1555 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1557 if ( handle->nStreams[0] == 1 ) {
\r
1558 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1560 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1562 else { // fill multiple streams with zeros
\r
1563 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1564 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1566 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1570 else if ( handle->nStreams[0] == 1 ) {
\r
1571 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1572 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1573 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1575 else { // copy from user buffer
\r
1576 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1577 stream_.userBuffer[0],
\r
1578 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1581 else { // fill multiple streams
\r
1582 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1583 if ( stream_.doConvertBuffer[0] ) {
\r
1584 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1585 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1588 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1589 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1590 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1591 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1592 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1595 else { // fill multiple multi-channel streams with interleaved data
\r
1596 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1597 Float32 *out, *in;
\r
1599 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1600 UInt32 inChannels = stream_.nUserChannels[0];
\r
1601 if ( stream_.doConvertBuffer[0] ) {
\r
1602 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1603 inChannels = stream_.nDeviceChannels[0];
\r
1606 if ( inInterleaved ) inOffset = 1;
\r
1607 else inOffset = stream_.bufferSize;
\r
1609 channelsLeft = inChannels;
\r
1610 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1612 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1613 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1616 // Account for possible channel offset in first stream
\r
1617 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1618 streamChannels -= stream_.channelOffset[0];
\r
1619 outJump = stream_.channelOffset[0];
\r
1623 // Account for possible unfilled channels at end of the last stream
\r
1624 if ( streamChannels > channelsLeft ) {
\r
1625 outJump = streamChannels - channelsLeft;
\r
1626 streamChannels = channelsLeft;
\r
1629 // Determine input buffer offsets and skips
\r
1630 if ( inInterleaved ) {
\r
1631 inJump = inChannels;
\r
1632 in += inChannels - channelsLeft;
\r
1636 in += (inChannels - channelsLeft) * inOffset;
\r
1639 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1640 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1641 *out++ = in[j*inOffset];
\r
1646 channelsLeft -= streamChannels;
\r
1651 if ( handle->drainCounter ) {
\r
1652 handle->drainCounter++;
\r
1657 AudioDeviceID inputDevice;
\r
1658 inputDevice = handle->id[1];
\r
1659 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1661 if ( handle->nStreams[1] == 1 ) {
\r
1662 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1663 convertBuffer( stream_.userBuffer[1],
\r
1664 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1665 stream_.convertInfo[1] );
\r
1667 else { // copy to user buffer
\r
1668 memcpy( stream_.userBuffer[1],
\r
1669 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1670 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1673 else { // read from multiple streams
\r
1674 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1675 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1677 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1678 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1679 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1680 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1681 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1684 else { // read from multiple multi-channel streams
\r
1685 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1686 Float32 *out, *in;
\r
1688 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1689 UInt32 outChannels = stream_.nUserChannels[1];
\r
1690 if ( stream_.doConvertBuffer[1] ) {
\r
1691 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1692 outChannels = stream_.nDeviceChannels[1];
\r
1695 if ( outInterleaved ) outOffset = 1;
\r
1696 else outOffset = stream_.bufferSize;
\r
1698 channelsLeft = outChannels;
\r
1699 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1701 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1702 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1705 // Account for possible channel offset in first stream
\r
1706 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1707 streamChannels -= stream_.channelOffset[1];
\r
1708 inJump = stream_.channelOffset[1];
\r
1712 // Account for possible unread channels at end of the last stream
\r
1713 if ( streamChannels > channelsLeft ) {
\r
1714 inJump = streamChannels - channelsLeft;
\r
1715 streamChannels = channelsLeft;
\r
1718 // Determine output buffer offsets and skips
\r
1719 if ( outInterleaved ) {
\r
1720 outJump = outChannels;
\r
1721 out += outChannels - channelsLeft;
\r
1725 out += (outChannels - channelsLeft) * outOffset;
\r
1728 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1729 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1730 out[j*outOffset] = *in++;
\r
1735 channelsLeft -= streamChannels;
\r
1739 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1740 convertBuffer( stream_.userBuffer[1],
\r
1741 stream_.deviceBuffer,
\r
1742 stream_.convertInfo[1] );
\r
1748 //MUTEX_UNLOCK( &stream_.mutex );
\r
1750 RtApi::tickStreamTime();
\r
1754 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1758 case kAudioHardwareNotRunningError:
\r
1759 return "kAudioHardwareNotRunningError";
\r
1761 case kAudioHardwareUnspecifiedError:
\r
1762 return "kAudioHardwareUnspecifiedError";
\r
1764 case kAudioHardwareUnknownPropertyError:
\r
1765 return "kAudioHardwareUnknownPropertyError";
\r
1767 case kAudioHardwareBadPropertySizeError:
\r
1768 return "kAudioHardwareBadPropertySizeError";
\r
1770 case kAudioHardwareIllegalOperationError:
\r
1771 return "kAudioHardwareIllegalOperationError";
\r
1773 case kAudioHardwareBadObjectError:
\r
1774 return "kAudioHardwareBadObjectError";
\r
1776 case kAudioHardwareBadDeviceError:
\r
1777 return "kAudioHardwareBadDeviceError";
\r
1779 case kAudioHardwareBadStreamError:
\r
1780 return "kAudioHardwareBadStreamError";
\r
1782 case kAudioHardwareUnsupportedOperationError:
\r
1783 return "kAudioHardwareUnsupportedOperationError";
\r
1785 case kAudioDeviceUnsupportedFormatError:
\r
1786 return "kAudioDeviceUnsupportedFormatError";
\r
1788 case kAudioDevicePermissionsError:
\r
1789 return "kAudioDevicePermissionsError";
\r
1792 return "CoreAudio unknown error";
\r
1796 //******************** End of __MACOSX_CORE__ *********************//
\r
1799 #if defined(__UNIX_JACK__)
\r
1801 // JACK is a low-latency audio server, originally written for the
\r
1802 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1803 // connect a number of different applications to an audio device, as
\r
1804 // well as allowing them to share audio between themselves.
\r
1806 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1807 // have ports connected to the server. The JACK server is typically
\r
1808 // started in a terminal as follows:
\r
1810 // .jackd -d alsa -d hw:0
\r
1812 // or through an interface program such as qjackctl. Many of the
\r
1813 // parameters normally set for a stream are fixed by the JACK server
\r
1814 // and can be specified when the JACK server is started. In
\r
1817 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1819 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1820 // frames, and number of buffers = 4. Once the server is running, it
\r
1821 // is not possible to override these values. If the values are not
\r
1822 // specified in the command-line, the JACK server uses default values.
\r
1824 // The JACK server does not have to be running when an instance of
\r
1825 // RtApiJack is created, though the function getDeviceCount() will
\r
1826 // report 0 devices found until JACK has been started. When no
\r
1827 // devices are available (i.e., the JACK server is not running), a
\r
1828 // stream cannot be opened.
\r
1830 #include <jack/jack.h>
\r
1831 #include <unistd.h>
\r
1834 // A structure to hold various information related to the Jack API
\r
1835 // implementation.
\r
1836 struct JackHandle {
\r
1837 jack_client_t *client;
\r
1838 jack_port_t **ports[2];
\r
1839 std::string deviceName[2];
\r
1841 pthread_cond_t condition;
\r
1842 int drainCounter; // Tracks callback counts when draining
\r
1843 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1846 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1849 ThreadHandle threadId;
\r
1850 void jackSilentError( const char * ) {};
\r
1852 RtApiJack :: RtApiJack()
\r
1854 // Nothing to do here.
\r
1855 #if !defined(__RTAUDIO_DEBUG__)
\r
1856 // Turn off Jack's internal error reporting.
\r
1857 jack_set_error_function( &jackSilentError );
\r
1861 RtApiJack :: ~RtApiJack()
\r
1863 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1866 unsigned int RtApiJack :: getDeviceCount( void )
\r
1868 // See if we can become a jack client.
\r
1869 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1870 jack_status_t *status = NULL;
\r
1871 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1872 if ( client == 0 ) return 0;
\r
1874 const char **ports;
\r
1875 std::string port, previousPort;
\r
1876 unsigned int nChannels = 0, nDevices = 0;
\r
1877 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1879 // Parse the port names up to the first colon (:).
\r
1880 size_t iColon = 0;
\r
1882 port = (char *) ports[ nChannels ];
\r
1883 iColon = port.find(":");
\r
1884 if ( iColon != std::string::npos ) {
\r
1885 port = port.substr( 0, iColon + 1 );
\r
1886 if ( port != previousPort ) {
\r
1888 previousPort = port;
\r
1891 } while ( ports[++nChannels] );
\r
1895 jack_client_close( client );
\r
1899 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1901 RtAudio::DeviceInfo info;
\r
1902 info.probed = false;
\r
1904 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1905 jack_status_t *status = NULL;
\r
1906 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1907 if ( client == 0 ) {
\r
1908 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1909 error( RtError::WARNING );
\r
1913 const char **ports;
\r
1914 std::string port, previousPort;
\r
1915 unsigned int nPorts = 0, nDevices = 0;
\r
1916 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1918 // Parse the port names up to the first colon (:).
\r
1919 size_t iColon = 0;
\r
1921 port = (char *) ports[ nPorts ];
\r
1922 iColon = port.find(":");
\r
1923 if ( iColon != std::string::npos ) {
\r
1924 port = port.substr( 0, iColon );
\r
1925 if ( port != previousPort ) {
\r
1926 if ( nDevices == device ) info.name = port;
\r
1928 previousPort = port;
\r
1931 } while ( ports[++nPorts] );
\r
1935 if ( device >= nDevices ) {
\r
1936 jack_client_close( client );
\r
1937 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1938 error( RtError::INVALID_USE );
\r
1941 // Get the current jack server sample rate.
\r
1942 info.sampleRates.clear();
\r
1943 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1945 // Count the available ports containing the client name as device
\r
1946 // channels. Jack "input ports" equal RtAudio output channels.
\r
1947 unsigned int nChannels = 0;
\r
1948 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1950 while ( ports[ nChannels ] ) nChannels++;
\r
1952 info.outputChannels = nChannels;
\r
1955 // Jack "output ports" equal RtAudio input channels.
\r
1957 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1959 while ( ports[ nChannels ] ) nChannels++;
\r
1961 info.inputChannels = nChannels;
\r
1964 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1965 jack_client_close(client);
\r
1966 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1967 error( RtError::WARNING );
\r
1971 // If device opens for both playback and capture, we determine the channels.
\r
1972 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1973 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1975 // Jack always uses 32-bit floats.
\r
1976 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1978 // Jack doesn't provide default devices so we'll use the first available one.
\r
1979 if ( device == 0 && info.outputChannels > 0 )
\r
1980 info.isDefaultOutput = true;
\r
1981 if ( device == 0 && info.inputChannels > 0 )
\r
1982 info.isDefaultInput = true;
\r
1984 jack_client_close(client);
\r
1985 info.probed = true;
\r
1989 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1991 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1993 RtApiJack *object = (RtApiJack *) info->object;
\r
1994 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1999 // This function will be called by a spawned thread when the Jack
\r
2000 // server signals that it is shutting down. It is necessary to handle
\r
2001 // it this way because the jackShutdown() function must return before
\r
2002 // the jack_deactivate() function (in closeStream()) will return.
\r
2003 extern "C" void *jackCloseStream( void *ptr )
\r
2005 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2006 RtApiJack *object = (RtApiJack *) info->object;
\r
2008 object->closeStream();
\r
2010 pthread_exit( NULL );
\r
2012 void jackShutdown( void *infoPointer )
\r
2014 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2015 RtApiJack *object = (RtApiJack *) info->object;
\r
2017 // Check current stream state. If stopped, then we'll assume this
\r
2018 // was called as a result of a call to RtApiJack::stopStream (the
\r
2019 // deactivation of a client handle causes this function to be called).
\r
2020 // If not, we'll assume the Jack server is shutting down or some
\r
2021 // other problem occurred and we should close the stream.
\r
2022 if ( object->isStreamRunning() == false ) return;
\r
2024 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2025 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2028 int jackXrun( void *infoPointer )
\r
2030 JackHandle *handle = (JackHandle *) infoPointer;
\r
2032 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2033 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2038 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2039 unsigned int firstChannel, unsigned int sampleRate,
\r
2040 RtAudioFormat format, unsigned int *bufferSize,
\r
2041 RtAudio::StreamOptions *options )
\r
2043 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2045 // Look for jack server and try to become a client (only do once per stream).
\r
2046 jack_client_t *client = 0;
\r
2047 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2048 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2049 jack_status_t *status = NULL;
\r
2050 if ( options && !options->streamName.empty() )
\r
2051 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2053 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2054 if ( client == 0 ) {
\r
2055 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2056 error( RtError::WARNING );
\r
2061 // The handle must have been created on an earlier pass.
\r
2062 client = handle->client;
\r
2065 const char **ports;
\r
2066 std::string port, previousPort, deviceName;
\r
2067 unsigned int nPorts = 0, nDevices = 0;
\r
2068 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2070 // Parse the port names up to the first colon (:).
\r
2071 size_t iColon = 0;
\r
2073 port = (char *) ports[ nPorts ];
\r
2074 iColon = port.find(":");
\r
2075 if ( iColon != std::string::npos ) {
\r
2076 port = port.substr( 0, iColon );
\r
2077 if ( port != previousPort ) {
\r
2078 if ( nDevices == device ) deviceName = port;
\r
2080 previousPort = port;
\r
2083 } while ( ports[++nPorts] );
\r
2087 if ( device >= nDevices ) {
\r
2088 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2092 // Count the available ports containing the client name as device
\r
2093 // channels. Jack "input ports" equal RtAudio output channels.
\r
2094 unsigned int nChannels = 0;
\r
2095 unsigned long flag = JackPortIsInput;
\r
2096 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2097 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2099 while ( ports[ nChannels ] ) nChannels++;
\r
2103 // Compare the jack ports for specified client to the requested number of channels.
\r
2104 if ( nChannels < (channels + firstChannel) ) {
\r
2105 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2106 errorText_ = errorStream_.str();
\r
2110 // Check the jack server sample rate.
\r
2111 unsigned int jackRate = jack_get_sample_rate( client );
\r
2112 if ( sampleRate != jackRate ) {
\r
2113 jack_client_close( client );
\r
2114 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2115 errorText_ = errorStream_.str();
\r
2118 stream_.sampleRate = jackRate;
\r
2120 // Get the latency of the JACK port.
\r
2121 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2122 if ( ports[ firstChannel ] )
\r
2123 stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2126 // The jack server always uses 32-bit floating-point data.
\r
2127 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2128 stream_.userFormat = format;
\r
2130 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2131 else stream_.userInterleaved = true;
\r
2133 // Jack always uses non-interleaved buffers.
\r
2134 stream_.deviceInterleaved[mode] = false;
\r
2136 // Jack always provides host byte-ordered data.
\r
2137 stream_.doByteSwap[mode] = false;
\r
2139 // Get the buffer size. The buffer size and number of buffers
\r
2140 // (periods) is set when the jack server is started.
\r
2141 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2142 *bufferSize = stream_.bufferSize;
\r
2144 stream_.nDeviceChannels[mode] = channels;
\r
2145 stream_.nUserChannels[mode] = channels;
\r
2147 // Set flags for buffer conversion.
\r
2148 stream_.doConvertBuffer[mode] = false;
\r
2149 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2150 stream_.doConvertBuffer[mode] = true;
\r
2151 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2152 stream_.nUserChannels[mode] > 1 )
\r
2153 stream_.doConvertBuffer[mode] = true;
\r
2155 // Allocate our JackHandle structure for the stream.
\r
2156 if ( handle == 0 ) {
\r
2158 handle = new JackHandle;
\r
2160 catch ( std::bad_alloc& ) {
\r
2161 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2165 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2166 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2169 stream_.apiHandle = (void *) handle;
\r
2170 handle->client = client;
\r
2172 handle->deviceName[mode] = deviceName;
\r
2174 // Allocate necessary internal buffers.
\r
2175 unsigned long bufferBytes;
\r
2176 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2177 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2178 if ( stream_.userBuffer[mode] == NULL ) {
\r
2179 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2183 if ( stream_.doConvertBuffer[mode] ) {
\r
2185 bool makeBuffer = true;
\r
2186 if ( mode == OUTPUT )
\r
2187 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2188 else { // mode == INPUT
\r
2189 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2190 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2191 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2192 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2196 if ( makeBuffer ) {
\r
2197 bufferBytes *= *bufferSize;
\r
2198 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2199 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2200 if ( stream_.deviceBuffer == NULL ) {
\r
2201 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2207 // Allocate memory for the Jack ports (channels) identifiers.
\r
2208 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2209 if ( handle->ports[mode] == NULL ) {
\r
2210 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2214 stream_.device[mode] = device;
\r
2215 stream_.channelOffset[mode] = firstChannel;
\r
2216 stream_.state = STREAM_STOPPED;
\r
2217 stream_.callbackInfo.object = (void *) this;
\r
2219 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2220 // We had already set up the stream for output.
\r
2221 stream_.mode = DUPLEX;
\r
2223 stream_.mode = mode;
\r
2224 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2225 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2226 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2229 // Register our ports.
\r
2231 if ( mode == OUTPUT ) {
\r
2232 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2233 snprintf( label, 64, "outport %d", i );
\r
2234 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2235 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2239 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2240 snprintf( label, 64, "inport %d", i );
\r
2241 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2242 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2246 // Setup the buffer conversion information structure. We don't use
\r
2247 // buffers to do channel offsets, so we override that parameter
\r
2249 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2255 pthread_cond_destroy( &handle->condition );
\r
2256 jack_client_close( handle->client );
\r
2258 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2259 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2262 stream_.apiHandle = 0;
\r
2265 for ( int i=0; i<2; i++ ) {
\r
2266 if ( stream_.userBuffer[i] ) {
\r
2267 free( stream_.userBuffer[i] );
\r
2268 stream_.userBuffer[i] = 0;
\r
2272 if ( stream_.deviceBuffer ) {
\r
2273 free( stream_.deviceBuffer );
\r
2274 stream_.deviceBuffer = 0;
\r
2280 void RtApiJack :: closeStream( void )
\r
2282 if ( stream_.state == STREAM_CLOSED ) {
\r
2283 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2284 error( RtError::WARNING );
\r
2288 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2291 if ( stream_.state == STREAM_RUNNING )
\r
2292 jack_deactivate( handle->client );
\r
2294 jack_client_close( handle->client );
\r
2298 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2299 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2300 pthread_cond_destroy( &handle->condition );
\r
2302 stream_.apiHandle = 0;
\r
2305 for ( int i=0; i<2; i++ ) {
\r
2306 if ( stream_.userBuffer[i] ) {
\r
2307 free( stream_.userBuffer[i] );
\r
2308 stream_.userBuffer[i] = 0;
\r
2312 if ( stream_.deviceBuffer ) {
\r
2313 free( stream_.deviceBuffer );
\r
2314 stream_.deviceBuffer = 0;
\r
2317 stream_.mode = UNINITIALIZED;
\r
2318 stream_.state = STREAM_CLOSED;
\r
2321 void RtApiJack :: startStream( void )
\r
2324 if ( stream_.state == STREAM_RUNNING ) {
\r
2325 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2326 error( RtError::WARNING );
\r
2330 MUTEX_LOCK(&stream_.mutex);
\r
2332 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2333 int result = jack_activate( handle->client );
\r
2335 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2339 const char **ports;
\r
2341 // Get the list of available ports.
\r
2342 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2344 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2345 if ( ports == NULL) {
\r
2346 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2350 // Now make the port connections. Since RtAudio wasn't designed to
\r
2351 // allow the user to select particular channels of a device, we'll
\r
2352 // just open the first "nChannels" ports with offset.
\r
2353 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2355 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2356 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2359 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2366 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2368 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2369 if ( ports == NULL) {
\r
2370 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2374 // Now make the port connections. See note above.
\r
2375 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2377 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2378 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2381 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2388 handle->drainCounter = 0;
\r
2389 handle->internalDrain = false;
\r
2390 stream_.state = STREAM_RUNNING;
\r
2393 MUTEX_UNLOCK(&stream_.mutex);
\r
2395 if ( result == 0 ) return;
\r
2396 error( RtError::SYSTEM_ERROR );
\r
2399 void RtApiJack :: stopStream( void )
\r
2402 if ( stream_.state == STREAM_STOPPED ) {
\r
2403 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2404 error( RtError::WARNING );
\r
2408 MUTEX_LOCK( &stream_.mutex );
\r
2410 if ( stream_.state == STREAM_STOPPED ) {
\r
2411 MUTEX_UNLOCK( &stream_.mutex );
\r
2415 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2416 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2418 if ( handle->drainCounter == 0 ) {
\r
2419 handle->drainCounter = 2;
\r
2420 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2424 jack_deactivate( handle->client );
\r
2425 stream_.state = STREAM_STOPPED;
\r
2427 MUTEX_UNLOCK( &stream_.mutex );
\r
2430 void RtApiJack :: abortStream( void )
\r
2433 if ( stream_.state == STREAM_STOPPED ) {
\r
2434 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2435 error( RtError::WARNING );
\r
2439 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2440 handle->drainCounter = 2;
\r
2445 // This function will be called by a spawned thread when the user
\r
2446 // callback function signals that the stream should be stopped or
\r
2447 // aborted. It is necessary to handle it this way because the
\r
2448 // callbackEvent() function must return before the jack_deactivate()
\r
2449 // function will return.
\r
2450 extern "C" void *jackStopStream( void *ptr )
\r
2452 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2453 RtApiJack *object = (RtApiJack *) info->object;
\r
2455 object->stopStream();
\r
2457 pthread_exit( NULL );
\r
2460 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2462 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
2463 if ( stream_.state == STREAM_CLOSED ) {
\r
2464 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2465 error( RtError::WARNING );
\r
2468 if ( stream_.bufferSize != nframes ) {
\r
2469 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2470 error( RtError::WARNING );
\r
2474 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2475 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2477 // Check if we were draining the stream and signal is finished.
\r
2478 if ( handle->drainCounter > 3 ) {
\r
2479 if ( handle->internalDrain == true )
\r
2480 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2482 pthread_cond_signal( &handle->condition );
\r
2486 MUTEX_LOCK( &stream_.mutex );
\r
2488 // The state might change while waiting on a mutex.
\r
2489 if ( stream_.state == STREAM_STOPPED ) {
\r
2490 MUTEX_UNLOCK( &stream_.mutex );
\r
2494 // Invoke user callback first, to get fresh output data.
\r
2495 if ( handle->drainCounter == 0 ) {
\r
2496 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2497 double streamTime = getStreamTime();
\r
2498 RtAudioStreamStatus status = 0;
\r
2499 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2500 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2501 handle->xrun[0] = false;
\r
2503 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2504 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2505 handle->xrun[1] = false;
\r
2507 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2508 stream_.bufferSize, streamTime, status, info->userData );
\r
2509 if ( cbReturnValue == 2 ) {
\r
2510 MUTEX_UNLOCK( &stream_.mutex );
\r
2512 handle->drainCounter = 2;
\r
2513 pthread_create( &id, NULL, jackStopStream, info );
\r
2516 else if ( cbReturnValue == 1 ) {
\r
2517 handle->drainCounter = 1;
\r
2518 handle->internalDrain = true;
\r
2522 jack_default_audio_sample_t *jackbuffer;
\r
2523 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2524 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2526 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2528 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2529 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2530 memset( jackbuffer, 0, bufferBytes );
\r
2534 else if ( stream_.doConvertBuffer[0] ) {
\r
2536 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2538 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2539 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2540 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2543 else { // no buffer conversion
\r
2544 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2545 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2546 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2550 if ( handle->drainCounter ) {
\r
2551 handle->drainCounter++;
\r
2556 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2558 if ( stream_.doConvertBuffer[1] ) {
\r
2559 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2560 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2561 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2563 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2565 else { // no buffer conversion
\r
2566 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2567 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2568 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2574 MUTEX_UNLOCK(&stream_.mutex);
\r
2576 RtApi::tickStreamTime();
\r
2579 //******************** End of __UNIX_JACK__ *********************//
\r
2582 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2584 // The ASIO API is designed around a callback scheme, so this
\r
2585 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2586 // Jack. The primary constraint with ASIO is that it only allows
\r
2587 // access to a single driver at a time. Thus, it is not possible to
\r
2588 // have more than one simultaneous RtAudio stream.
\r
2590 // This implementation also requires a number of external ASIO files
\r
2591 // and a few global variables. The ASIO callback scheme does not
\r
2592 // allow for the passing of user data, so we must create a global
\r
2593 // pointer to our callbackInfo structure.
\r
2595 // On unix systems, we make use of a pthread condition variable.
\r
2596 // Since there is no equivalent in Windows, I hacked something based
\r
2597 // on information found in
\r
2598 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2600 #include "asiosys.h"
\r
2602 #include "iasiothiscallresolver.h"
\r
2603 #include "asiodrivers.h"
\r
2606 AsioDrivers drivers;
\r
2607 ASIOCallbacks asioCallbacks;
\r
2608 ASIODriverInfo driverInfo;
\r
2609 CallbackInfo *asioCallbackInfo;
\r
2612 struct AsioHandle {
\r
2613 int drainCounter; // Tracks callback counts when draining
\r
2614 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2615 ASIOBufferInfo *bufferInfos;
\r
2619 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2622 // Function declarations (definitions at end of section)
\r
2623 static const char* getAsioErrorString( ASIOError result );
\r
2624 void sampleRateChanged( ASIOSampleRate sRate );
\r
2625 long asioMessages( long selector, long value, void* message, double* opt );
\r
2627 RtApiAsio :: RtApiAsio()
\r
2629 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2630 // CoInitialize beforehand, but it must be for appartment threading
\r
2631 // (in which case, CoInitilialize will return S_FALSE here).
\r
2632 coInitialized_ = false;
\r
2633 HRESULT hr = CoInitialize( NULL );
\r
2634 if ( FAILED(hr) ) {
\r
2635 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2636 error( RtError::WARNING );
\r
2638 coInitialized_ = true;
\r
2640 drivers.removeCurrentDriver();
\r
2641 driverInfo.asioVersion = 2;
\r
2643 // See note in DirectSound implementation about GetDesktopWindow().
\r
2644 driverInfo.sysRef = GetForegroundWindow();
\r
2647 RtApiAsio :: ~RtApiAsio()
\r
2649 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2650 if ( coInitialized_ ) CoUninitialize();
\r
2653 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2655 return (unsigned int) drivers.asioGetNumDev();
\r
2658 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2660 RtAudio::DeviceInfo info;
\r
2661 info.probed = false;
\r
2664 unsigned int nDevices = getDeviceCount();
\r
2665 if ( nDevices == 0 ) {
\r
2666 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2667 error( RtError::INVALID_USE );
\r
2670 if ( device >= nDevices ) {
\r
2671 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2672 error( RtError::INVALID_USE );
\r
2675 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2676 if ( stream_.state != STREAM_CLOSED ) {
\r
2677 if ( device >= devices_.size() ) {
\r
2678 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2679 error( RtError::WARNING );
\r
2682 return devices_[ device ];
\r
2685 char driverName[32];
\r
2686 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2687 if ( result != ASE_OK ) {
\r
2688 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2689 errorText_ = errorStream_.str();
\r
2690 error( RtError::WARNING );
\r
2694 info.name = driverName;
\r
2696 if ( !drivers.loadDriver( driverName ) ) {
\r
2697 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2698 errorText_ = errorStream_.str();
\r
2699 error( RtError::WARNING );
\r
2703 result = ASIOInit( &driverInfo );
\r
2704 if ( result != ASE_OK ) {
\r
2705 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2706 errorText_ = errorStream_.str();
\r
2707 error( RtError::WARNING );
\r
2711 // Determine the device channel information.
\r
2712 long inputChannels, outputChannels;
\r
2713 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2714 if ( result != ASE_OK ) {
\r
2715 drivers.removeCurrentDriver();
\r
2716 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2717 errorText_ = errorStream_.str();
\r
2718 error( RtError::WARNING );
\r
2722 info.outputChannels = outputChannels;
\r
2723 info.inputChannels = inputChannels;
\r
2724 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2725 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2727 // Determine the supported sample rates.
\r
2728 info.sampleRates.clear();
\r
2729 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2730 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2731 if ( result == ASE_OK )
\r
2732 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2735 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2736 ASIOChannelInfo channelInfo;
\r
2737 channelInfo.channel = 0;
\r
2738 channelInfo.isInput = true;
\r
2739 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2740 result = ASIOGetChannelInfo( &channelInfo );
\r
2741 if ( result != ASE_OK ) {
\r
2742 drivers.removeCurrentDriver();
\r
2743 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2744 errorText_ = errorStream_.str();
\r
2745 error( RtError::WARNING );
\r
2749 info.nativeFormats = 0;
\r
2750 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2751 info.nativeFormats |= RTAUDIO_SINT16;
\r
2752 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2753 info.nativeFormats |= RTAUDIO_SINT32;
\r
2754 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2755 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2756 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2757 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2759 if ( info.outputChannels > 0 )
\r
2760 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2761 if ( info.inputChannels > 0 )
\r
2762 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2764 info.probed = true;
\r
2765 drivers.removeCurrentDriver();
\r
2769 void bufferSwitch( long index, ASIOBool processNow )
\r
2771 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2772 object->callbackEvent( index );
\r
2775 void RtApiAsio :: saveDeviceInfo( void )
\r
2779 unsigned int nDevices = getDeviceCount();
\r
2780 devices_.resize( nDevices );
\r
2781 for ( unsigned int i=0; i<nDevices; i++ )
\r
2782 devices_[i] = getDeviceInfo( i );
\r
2785 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2786 unsigned int firstChannel, unsigned int sampleRate,
\r
2787 RtAudioFormat format, unsigned int *bufferSize,
\r
2788 RtAudio::StreamOptions *options )
\r
2790 // For ASIO, a duplex stream MUST use the same driver.
\r
2791 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2792 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2796 char driverName[32];
\r
2797 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2798 if ( result != ASE_OK ) {
\r
2799 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2800 errorText_ = errorStream_.str();
\r
2804 // Only load the driver once for duplex stream.
\r
2805 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2806 // The getDeviceInfo() function will not work when a stream is open
\r
2807 // because ASIO does not allow multiple devices to run at the same
\r
2808 // time. Thus, we'll probe the system before opening a stream and
\r
2809 // save the results for use by getDeviceInfo().
\r
2810 this->saveDeviceInfo();
\r
2812 if ( !drivers.loadDriver( driverName ) ) {
\r
2813 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2814 errorText_ = errorStream_.str();
\r
2818 result = ASIOInit( &driverInfo );
\r
2819 if ( result != ASE_OK ) {
\r
2820 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2821 errorText_ = errorStream_.str();
\r
2826 // Check the device channel count.
\r
2827 long inputChannels, outputChannels;
\r
2828 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2829 if ( result != ASE_OK ) {
\r
2830 drivers.removeCurrentDriver();
\r
2831 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2832 errorText_ = errorStream_.str();
\r
2836 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2837 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2838 drivers.removeCurrentDriver();
\r
2839 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2840 errorText_ = errorStream_.str();
\r
2843 stream_.nDeviceChannels[mode] = channels;
\r
2844 stream_.nUserChannels[mode] = channels;
\r
2845 stream_.channelOffset[mode] = firstChannel;
\r
2847 // Verify the sample rate is supported.
\r
2848 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2849 if ( result != ASE_OK ) {
\r
2850 drivers.removeCurrentDriver();
\r
2851 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2852 errorText_ = errorStream_.str();
\r
2856 // Get the current sample rate
\r
2857 ASIOSampleRate currentRate;
\r
2858 result = ASIOGetSampleRate( ¤tRate );
\r
2859 if ( result != ASE_OK ) {
\r
2860 drivers.removeCurrentDriver();
\r
2861 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2862 errorText_ = errorStream_.str();
\r
2866 // Set the sample rate only if necessary
\r
2867 if ( currentRate != sampleRate ) {
\r
2868 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2869 if ( result != ASE_OK ) {
\r
2870 drivers.removeCurrentDriver();
\r
2871 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2872 errorText_ = errorStream_.str();
\r
2877 // Determine the driver data type.
\r
2878 ASIOChannelInfo channelInfo;
\r
2879 channelInfo.channel = 0;
\r
2880 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2881 else channelInfo.isInput = true;
\r
2882 result = ASIOGetChannelInfo( &channelInfo );
\r
2883 if ( result != ASE_OK ) {
\r
2884 drivers.removeCurrentDriver();
\r
2885 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2886 errorText_ = errorStream_.str();
\r
2890 // Assuming WINDOWS host is always little-endian.
\r
2891 stream_.doByteSwap[mode] = false;
\r
2892 stream_.userFormat = format;
\r
2893 stream_.deviceFormat[mode] = 0;
\r
2894 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2895 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2896 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2898 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2899 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2900 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2902 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2903 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2904 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2906 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2907 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2908 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2911 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2912 drivers.removeCurrentDriver();
\r
2913 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2914 errorText_ = errorStream_.str();
\r
2918 // Set the buffer size. For a duplex stream, this will end up
\r
2919 // setting the buffer size based on the input constraints, which
\r
2921 long minSize, maxSize, preferSize, granularity;
\r
2922 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2923 if ( result != ASE_OK ) {
\r
2924 drivers.removeCurrentDriver();
\r
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2926 errorText_ = errorStream_.str();
\r
2930 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2931 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2932 else if ( granularity == -1 ) {
\r
2933 // Make sure bufferSize is a power of two.
\r
2934 int log2_of_min_size = 0;
\r
2935 int log2_of_max_size = 0;
\r
2937 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2938 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2939 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2942 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2943 int min_delta_num = log2_of_min_size;
\r
2945 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2946 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2947 if (current_delta < min_delta) {
\r
2948 min_delta = current_delta;
\r
2949 min_delta_num = i;
\r
2953 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2954 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2955 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2957 else if ( granularity != 0 ) {
\r
2958 // Set to an even multiple of granularity, rounding up.
\r
2959 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2962 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2963 drivers.removeCurrentDriver();
\r
2964 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2968 stream_.bufferSize = *bufferSize;
\r
2969 stream_.nBuffers = 2;
\r
2971 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2972 else stream_.userInterleaved = true;
\r
2974 // ASIO always uses non-interleaved buffers.
\r
2975 stream_.deviceInterleaved[mode] = false;
\r
2977 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2978 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2979 if ( handle == 0 ) {
\r
2981 handle = new AsioHandle;
\r
2983 catch ( std::bad_alloc& ) {
\r
2984 //if ( handle == NULL ) {
\r
2985 drivers.removeCurrentDriver();
\r
2986 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2989 handle->bufferInfos = 0;
\r
2991 // Create a manual-reset event.
\r
2992 handle->condition = CreateEvent( NULL, // no security
\r
2993 TRUE, // manual-reset
\r
2994 FALSE, // non-signaled initially
\r
2995 NULL ); // unnamed
\r
2996 stream_.apiHandle = (void *) handle;
\r
2999 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3000 // and output separately, we'll have to dispose of previously
\r
3001 // created output buffers for a duplex stream.
\r
3002 long inputLatency, outputLatency;
\r
3003 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3004 ASIODisposeBuffers();
\r
3005 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3008 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3009 bool buffersAllocated = false;
\r
3010 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3011 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3012 if ( handle->bufferInfos == NULL ) {
\r
3013 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3014 errorText_ = errorStream_.str();
\r
3018 ASIOBufferInfo *infos;
\r
3019 infos = handle->bufferInfos;
\r
3020 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3021 infos->isInput = ASIOFalse;
\r
3022 infos->channelNum = i + stream_.channelOffset[0];
\r
3023 infos->buffers[0] = infos->buffers[1] = 0;
\r
3025 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3026 infos->isInput = ASIOTrue;
\r
3027 infos->channelNum = i + stream_.channelOffset[1];
\r
3028 infos->buffers[0] = infos->buffers[1] = 0;
\r
3031 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3032 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3033 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3034 asioCallbacks.asioMessage = &asioMessages;
\r
3035 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3036 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3037 if ( result != ASE_OK ) {
\r
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3039 errorText_ = errorStream_.str();
\r
3042 buffersAllocated = true;
\r
3044 // Set flags for buffer conversion.
\r
3045 stream_.doConvertBuffer[mode] = false;
\r
3046 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3047 stream_.doConvertBuffer[mode] = true;
\r
3048 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3049 stream_.nUserChannels[mode] > 1 )
\r
3050 stream_.doConvertBuffer[mode] = true;
\r
3052 // Allocate necessary internal buffers
\r
3053 unsigned long bufferBytes;
\r
3054 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3055 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3056 if ( stream_.userBuffer[mode] == NULL ) {
\r
3057 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3061 if ( stream_.doConvertBuffer[mode] ) {
\r
3063 bool makeBuffer = true;
\r
3064 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3065 if ( mode == INPUT ) {
\r
3066 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3067 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3068 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3072 if ( makeBuffer ) {
\r
3073 bufferBytes *= *bufferSize;
\r
3074 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3075 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3076 if ( stream_.deviceBuffer == NULL ) {
\r
3077 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3083 stream_.sampleRate = sampleRate;
\r
3084 stream_.device[mode] = device;
\r
3085 stream_.state = STREAM_STOPPED;
\r
3086 asioCallbackInfo = &stream_.callbackInfo;
\r
3087 stream_.callbackInfo.object = (void *) this;
\r
3088 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3089 // We had already set up an output stream.
\r
3090 stream_.mode = DUPLEX;
\r
3092 stream_.mode = mode;
\r
3094 // Determine device latencies
\r
3095 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3096 if ( result != ASE_OK ) {
\r
3097 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3098 errorText_ = errorStream_.str();
\r
3099 error( RtError::WARNING); // warn but don't fail
\r
3102 stream_.latency[0] = outputLatency;
\r
3103 stream_.latency[1] = inputLatency;
\r
3106 // Setup the buffer conversion information structure. We don't use
\r
3107 // buffers to do channel offsets, so we override that parameter
\r
3109 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3114 if ( buffersAllocated )
\r
3115 ASIODisposeBuffers();
\r
3116 drivers.removeCurrentDriver();
\r
3119 CloseHandle( handle->condition );
\r
3120 if ( handle->bufferInfos )
\r
3121 free( handle->bufferInfos );
\r
3123 stream_.apiHandle = 0;
\r
3126 for ( int i=0; i<2; i++ ) {
\r
3127 if ( stream_.userBuffer[i] ) {
\r
3128 free( stream_.userBuffer[i] );
\r
3129 stream_.userBuffer[i] = 0;
\r
3133 if ( stream_.deviceBuffer ) {
\r
3134 free( stream_.deviceBuffer );
\r
3135 stream_.deviceBuffer = 0;
\r
3141 void RtApiAsio :: closeStream()
\r
3143 if ( stream_.state == STREAM_CLOSED ) {
\r
3144 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3145 error( RtError::WARNING );
\r
3149 if ( stream_.state == STREAM_RUNNING ) {
\r
3150 stream_.state = STREAM_STOPPED;
\r
3153 ASIODisposeBuffers();
\r
3154 drivers.removeCurrentDriver();
\r
3156 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3158 CloseHandle( handle->condition );
\r
3159 if ( handle->bufferInfos )
\r
3160 free( handle->bufferInfos );
\r
3162 stream_.apiHandle = 0;
\r
3165 for ( int i=0; i<2; i++ ) {
\r
3166 if ( stream_.userBuffer[i] ) {
\r
3167 free( stream_.userBuffer[i] );
\r
3168 stream_.userBuffer[i] = 0;
\r
3172 if ( stream_.deviceBuffer ) {
\r
3173 free( stream_.deviceBuffer );
\r
3174 stream_.deviceBuffer = 0;
\r
3177 stream_.mode = UNINITIALIZED;
\r
3178 stream_.state = STREAM_CLOSED;
\r
3181 bool stopThreadCalled = false;
\r
3183 void RtApiAsio :: startStream()
\r
3186 if ( stream_.state == STREAM_RUNNING ) {
\r
3187 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3188 error( RtError::WARNING );
\r
3192 //MUTEX_LOCK( &stream_.mutex );
\r
3194 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3195 ASIOError result = ASIOStart();
\r
3196 if ( result != ASE_OK ) {
\r
3197 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3198 errorText_ = errorStream_.str();
\r
3202 handle->drainCounter = 0;
\r
3203 handle->internalDrain = false;
\r
3204 ResetEvent( handle->condition );
\r
3205 stream_.state = STREAM_RUNNING;
\r
3209 //MUTEX_UNLOCK( &stream_.mutex );
\r
3211 stopThreadCalled = false;
\r
3213 if ( result == ASE_OK ) return;
\r
3214 error( RtError::SYSTEM_ERROR );
\r
3217 void RtApiAsio :: stopStream()
\r
3220 if ( stream_.state == STREAM_STOPPED ) {
\r
3221 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3222 error( RtError::WARNING );
\r
3227 MUTEX_LOCK( &stream_.mutex );
\r
3229 if ( stream_.state == STREAM_STOPPED ) {
\r
3230 MUTEX_UNLOCK( &stream_.mutex );
\r
3235 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3236 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3237 if ( handle->drainCounter == 0 ) {
\r
3238 handle->drainCounter = 2;
\r
3239 // MUTEX_UNLOCK( &stream_.mutex );
\r
3240 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3241 //ResetEvent( handle->condition );
\r
3242 // MUTEX_LOCK( &stream_.mutex );
\r
3246 stream_.state = STREAM_STOPPED;
\r
3248 ASIOError result = ASIOStop();
\r
3249 if ( result != ASE_OK ) {
\r
3250 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3251 errorText_ = errorStream_.str();
\r
3254 // MUTEX_UNLOCK( &stream_.mutex );
\r
3256 if ( result == ASE_OK ) return;
\r
3257 error( RtError::SYSTEM_ERROR );
\r
3260 void RtApiAsio :: abortStream()
\r
3263 if ( stream_.state == STREAM_STOPPED ) {
\r
3264 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3265 error( RtError::WARNING );
\r
3269 // The following lines were commented-out because some behavior was
\r
3270 // noted where the device buffers need to be zeroed to avoid
\r
3271 // continuing sound, even when the device buffers are completely
\r
3272 // disposed. So now, calling abort is the same as calling stop.
\r
3273 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3274 // handle->drainCounter = 2;
\r
3278 // This function will be called by a spawned thread when the user
\r
3279 // callback function signals that the stream should be stopped or
\r
3280 // aborted. It is necessary to handle it this way because the
\r
3281 // callbackEvent() function must return before the ASIOStop()
\r
3282 // function will return.
\r
3283 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3285 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3286 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3288 object->stopStream();
\r
3290 _endthreadex( 0 );
\r
3294 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3296 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
3297 if ( stopThreadCalled ) return SUCCESS;
\r
3298 if ( stream_.state == STREAM_CLOSED ) {
\r
3299 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3300 error( RtError::WARNING );
\r
3304 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3305 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3307 // Check if we were draining the stream and signal if finished.
\r
3308 if ( handle->drainCounter > 3 ) {
\r
3309 if ( handle->internalDrain == false )
\r
3310 SetEvent( handle->condition );
\r
3311 else { // spawn a thread to stop the stream
\r
3312 unsigned threadId;
\r
3313 stopThreadCalled = true;
\r
3314 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3315 &stream_.callbackInfo, 0, &threadId );
\r
3320 /*MUTEX_LOCK( &stream_.mutex );
\r
3322 // The state might change while waiting on a mutex.
\r
3323 if ( stream_.state == STREAM_STOPPED ) goto unlock; */
\r
3325 // Invoke user callback to get fresh output data UNLESS we are
\r
3326 // draining stream.
\r
3327 if ( handle->drainCounter == 0 ) {
\r
3328 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3329 double streamTime = getStreamTime();
\r
3330 RtAudioStreamStatus status = 0;
\r
3331 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3332 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3335 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3336 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3339 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3340 stream_.bufferSize, streamTime, status, info->userData );
\r
3341 if ( cbReturnValue == 2 ) {
\r
3342 // MUTEX_UNLOCK( &stream_.mutex );
\r
3344 unsigned threadId;
\r
3345 stopThreadCalled = true;
\r
3346 handle->drainCounter = 2;
\r
3347 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3348 &stream_.callbackInfo, 0, &threadId );
\r
3351 else if ( cbReturnValue == 1 ) {
\r
3352 handle->drainCounter = 1;
\r
3353 handle->internalDrain = true;
\r
3357 unsigned int nChannels, bufferBytes, i, j;
\r
3358 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3359 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3361 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3363 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3365 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3366 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3367 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3371 else if ( stream_.doConvertBuffer[0] ) {
\r
3373 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3374 if ( stream_.doByteSwap[0] )
\r
3375 byteSwapBuffer( stream_.deviceBuffer,
\r
3376 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3377 stream_.deviceFormat[0] );
\r
3379 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3380 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3381 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3382 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3388 if ( stream_.doByteSwap[0] )
\r
3389 byteSwapBuffer( stream_.userBuffer[0],
\r
3390 stream_.bufferSize * stream_.nUserChannels[0],
\r
3391 stream_.userFormat );
\r
3393 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3394 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3395 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3396 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3401 if ( handle->drainCounter ) {
\r
3402 handle->drainCounter++;
\r
3407 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3409 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3411 if (stream_.doConvertBuffer[1]) {
\r
3413 // Always interleave ASIO input data.
\r
3414 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3415 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3416 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3417 handle->bufferInfos[i].buffers[bufferIndex],
\r
3421 if ( stream_.doByteSwap[1] )
\r
3422 byteSwapBuffer( stream_.deviceBuffer,
\r
3423 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3424 stream_.deviceFormat[1] );
\r
3425 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3429 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3430 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3431 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3432 handle->bufferInfos[i].buffers[bufferIndex],
\r
3437 if ( stream_.doByteSwap[1] )
\r
3438 byteSwapBuffer( stream_.userBuffer[1],
\r
3439 stream_.bufferSize * stream_.nUserChannels[1],
\r
3440 stream_.userFormat );
\r
3445 // The following call was suggested by Malte Clasen. While the API
\r
3446 // documentation indicates it should not be required, some device
\r
3447 // drivers apparently do not function correctly without it.
\r
3448 ASIOOutputReady();
\r
3450 // MUTEX_UNLOCK( &stream_.mutex );
\r
3452 RtApi::tickStreamTime();
\r
3456 void sampleRateChanged( ASIOSampleRate sRate )
\r
3458 // The ASIO documentation says that this usually only happens during
\r
3459 // external sync. Audio processing is not stopped by the driver,
\r
3460 // actual sample rate might not have even changed, maybe only the
\r
3461 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3464 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3466 object->stopStream();
\r
3468 catch ( RtError &exception ) {
\r
3469 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3473 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3476 long asioMessages( long selector, long value, void* message, double* opt )
\r
3480 switch( selector ) {
\r
3481 case kAsioSelectorSupported:
\r
3482 if ( value == kAsioResetRequest
\r
3483 || value == kAsioEngineVersion
\r
3484 || value == kAsioResyncRequest
\r
3485 || value == kAsioLatenciesChanged
\r
3486 // The following three were added for ASIO 2.0, you don't
\r
3487 // necessarily have to support them.
\r
3488 || value == kAsioSupportsTimeInfo
\r
3489 || value == kAsioSupportsTimeCode
\r
3490 || value == kAsioSupportsInputMonitor)
\r
3493 case kAsioResetRequest:
\r
3494 // Defer the task and perform the reset of the driver during the
\r
3495 // next "safe" situation. You cannot reset the driver right now,
\r
3496 // as this code is called from the driver. Reset the driver is
\r
3497 // done by completely destruct is. I.e. ASIOStop(),
\r
3498 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3500 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3503 case kAsioResyncRequest:
\r
3504 // This informs the application that the driver encountered some
\r
3505 // non-fatal data loss. It is used for synchronization purposes
\r
3506 // of different media. Added mainly to work around the Win16Mutex
\r
3507 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3508 // which could lose data because the Mutex was held too long by
\r
3509 // another thread. However a driver can issue it in other
\r
3510 // situations, too.
\r
3511 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3515 case kAsioLatenciesChanged:
\r
3516 // This will inform the host application that the drivers were
\r
3517 // latencies changed. Beware, it this does not mean that the
\r
3518 // buffer sizes have changed! You might need to update internal
\r
3520 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3523 case kAsioEngineVersion:
\r
3524 // Return the supported ASIO version of the host application. If
\r
3525 // a host application does not implement this selector, ASIO 1.0
\r
3526 // is assumed by the driver.
\r
3529 case kAsioSupportsTimeInfo:
\r
3530 // Informs the driver whether the
\r
3531 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3532 // For compatibility with ASIO 1.0 drivers the host application
\r
3533 // should always support the "old" bufferSwitch method, too.
\r
3536 case kAsioSupportsTimeCode:
\r
3537 // Informs the driver whether application is interested in time
\r
3538 // code info. If an application does not need to know about time
\r
3539 // code, the driver has less work to do.
\r
3546 static const char* getAsioErrorString( ASIOError result )
\r
3551 const char*message;
\r
3554 static Messages m[] =
\r
3556 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3557 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3558 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3559 { ASE_InvalidMode, "Invalid mode." },
\r
3560 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3561 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3562 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3565 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3566 if ( m[i].value == result ) return m[i].message;
\r
3568 return "Unknown error.";
\r
3570 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3574 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3576 // Modified by Robin Davies, October 2005
\r
3577 // - Improvements to DirectX pointer chasing.
\r
3578 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3579 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3580 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3581 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3583 #include <dsound.h>
\r
3584 #include <assert.h>
\r
3585 #include <algorithm>
\r
3587 #if defined(__MINGW32__)
\r
3588 // missing from latest mingw winapi
\r
3589 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3590 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3591 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3592 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3595 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3597 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3598 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3601 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3603 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3604 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3605 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3606 return pointer >= earlierPointer && pointer < laterPointer;
\r
3609 // A structure to hold various information related to the DirectSound
\r
3610 // API implementation.
\r
3612 unsigned int drainCounter; // Tracks callback counts when draining
\r
3613 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3617 UINT bufferPointer[2];
\r
3618 DWORD dsBufferSize[2];
\r
3619 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3623 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3626 // Declarations for utility functions, callbacks, and structures
\r
3627 // specific to the DirectSound implementation.
\r
3628 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3629 LPCTSTR description,
\r
3631 LPVOID lpContext );
\r
3633 static const char* getErrorString( int code );
\r
3635 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3644 : found(false) { validId[0] = false; validId[1] = false; }
\r
3647 std::vector< DsDevice > dsDevices;
\r
3649 RtApiDs :: RtApiDs()
\r
3651 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3652 // accept whatever the mainline chose for a threading model.
\r
3653 coInitialized_ = false;
\r
3654 HRESULT hr = CoInitialize( NULL );
\r
3655 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3658 RtApiDs :: ~RtApiDs()
\r
3660 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3661 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3664 // The DirectSound default output is always the first device.
\r
3665 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3670 // The DirectSound default input is always the first input device,
\r
3671 // which is the first capture device enumerated.
\r
3672 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3677 unsigned int RtApiDs :: getDeviceCount( void )
\r
3679 // Set query flag for previously found devices to false, so that we
\r
3680 // can check for any devices that have disappeared.
\r
3681 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3682 dsDevices[i].found = false;
\r
3684 // Query DirectSound devices.
\r
3685 bool isInput = false;
\r
3686 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3687 if ( FAILED( result ) ) {
\r
3688 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3689 errorText_ = errorStream_.str();
\r
3690 error( RtError::WARNING );
\r
3693 // Query DirectSoundCapture devices.
\r
3695 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3696 if ( FAILED( result ) ) {
\r
3697 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3698 errorText_ = errorStream_.str();
\r
3699 error( RtError::WARNING );
\r
3702 // Clean out any devices that may have disappeared.
\r
3703 std::vector< int > indices;
\r
3704 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3705 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3706 unsigned int nErased = 0;
\r
3707 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3708 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3710 return dsDevices.size();
\r
3713 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3715 RtAudio::DeviceInfo info;
\r
3716 info.probed = false;
\r
3718 if ( dsDevices.size() == 0 ) {
\r
3719 // Force a query of all devices
\r
3721 if ( dsDevices.size() == 0 ) {
\r
3722 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3723 error( RtError::INVALID_USE );
\r
3727 if ( device >= dsDevices.size() ) {
\r
3728 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3729 error( RtError::INVALID_USE );
\r
3733 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3735 LPDIRECTSOUND output;
\r
3737 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3738 if ( FAILED( result ) ) {
\r
3739 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3740 errorText_ = errorStream_.str();
\r
3741 error( RtError::WARNING );
\r
3745 outCaps.dwSize = sizeof( outCaps );
\r
3746 result = output->GetCaps( &outCaps );
\r
3747 if ( FAILED( result ) ) {
\r
3748 output->Release();
\r
3749 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3750 errorText_ = errorStream_.str();
\r
3751 error( RtError::WARNING );
\r
3755 // Get output channel information.
\r
3756 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3758 // Get sample rate information.
\r
3759 info.sampleRates.clear();
\r
3760 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3761 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3762 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3763 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3766 // Get format information.
\r
3767 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3768 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3770 output->Release();
\r
3772 if ( getDefaultOutputDevice() == device )
\r
3773 info.isDefaultOutput = true;
\r
3775 if ( dsDevices[ device ].validId[1] == false ) {
\r
3776 info.name = dsDevices[ device ].name;
\r
3777 info.probed = true;
\r
3783 LPDIRECTSOUNDCAPTURE input;
\r
3784 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3785 if ( FAILED( result ) ) {
\r
3786 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3787 errorText_ = errorStream_.str();
\r
3788 error( RtError::WARNING );
\r
3793 inCaps.dwSize = sizeof( inCaps );
\r
3794 result = input->GetCaps( &inCaps );
\r
3795 if ( FAILED( result ) ) {
\r
3797 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3798 errorText_ = errorStream_.str();
\r
3799 error( RtError::WARNING );
\r
3803 // Get input channel information.
\r
3804 info.inputChannels = inCaps.dwChannels;
\r
3806 // Get sample rate and format information.
\r
3807 std::vector<unsigned int> rates;
\r
3808 if ( inCaps.dwChannels >= 2 ) {
\r
3809 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3810 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3811 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3812 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3813 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3814 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3815 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3816 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3818 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3819 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3820 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3821 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3822 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3824 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3825 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3826 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3827 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3828 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3831 else if ( inCaps.dwChannels == 1 ) {
\r
3832 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3833 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3834 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3835 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3836 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3837 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3838 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3839 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3841 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3842 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3843 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3844 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3845 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3847 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3848 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3849 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3850 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3851 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3854 else info.inputChannels = 0; // technically, this would be an error
\r
3858 if ( info.inputChannels == 0 ) return info;
\r
3860 // Copy the supported rates to the info structure but avoid duplication.
\r
3862 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3864 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3865 if ( rates[i] == info.sampleRates[j] ) {
\r
3870 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3872 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3874 // If device opens for both playback and capture, we determine the channels.
\r
3875 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3876 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3878 if ( device == 0 ) info.isDefaultInput = true;
\r
3880 // Copy name and return.
\r
3881 info.name = dsDevices[ device ].name;
\r
3882 info.probed = true;
\r
3886 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3887 unsigned int firstChannel, unsigned int sampleRate,
\r
3888 RtAudioFormat format, unsigned int *bufferSize,
\r
3889 RtAudio::StreamOptions *options )
\r
3891 if ( channels + firstChannel > 2 ) {
\r
3892 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3896 unsigned int nDevices = dsDevices.size();
\r
3897 if ( nDevices == 0 ) {
\r
3898 // This should not happen because a check is made before this function is called.
\r
3899 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3903 if ( device >= nDevices ) {
\r
3904 // This should not happen because a check is made before this function is called.
\r
3905 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3909 if ( mode == OUTPUT ) {
\r
3910 if ( dsDevices[ device ].validId[0] == false ) {
\r
3911 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3912 errorText_ = errorStream_.str();
\r
3916 else { // mode == INPUT
\r
3917 if ( dsDevices[ device ].validId[1] == false ) {
\r
3918 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3919 errorText_ = errorStream_.str();
\r
3924 // According to a note in PortAudio, using GetDesktopWindow()
\r
3925 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3926 // that occur when the application's window is not the foreground
\r
3927 // window. Also, if the application window closes before the
\r
3928 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3929 // problems when using GetDesktopWindow() but it seems fine now
\r
3930 // (January 2010). I'll leave it commented here.
\r
3931 // HWND hWnd = GetForegroundWindow();
\r
3932 HWND hWnd = GetDesktopWindow();
\r
3934 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3935 // two. This is a judgement call and a value of two is probably too
\r
3936 // low for capture, but it should work for playback.
\r
3938 if ( options ) nBuffers = options->numberOfBuffers;
\r
3939 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3940 if ( nBuffers < 2 ) nBuffers = 3;
\r
3942 // Check the lower range of the user-specified buffer size and set
\r
3943 // (arbitrarily) to a lower bound of 32.
\r
3944 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3946 // Create the wave format structure. The data format setting will
\r
3947 // be determined later.
\r
3948 WAVEFORMATEX waveFormat;
\r
3949 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3950 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3951 waveFormat.nChannels = channels + firstChannel;
\r
3952 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3954 // Determine the device buffer size. By default, we'll use the value
\r
3955 // defined above (32K), but we will grow it to make allowances for
\r
3956 // very large software buffer sizes.
\r
3957 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;
\r
3958 DWORD dsPointerLeadTime = 0;
\r
3960 void *ohandle = 0, *bhandle = 0;
\r
3962 if ( mode == OUTPUT ) {
\r
3964 LPDIRECTSOUND output;
\r
3965 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3966 if ( FAILED( result ) ) {
\r
3967 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3968 errorText_ = errorStream_.str();
\r
3973 outCaps.dwSize = sizeof( outCaps );
\r
3974 result = output->GetCaps( &outCaps );
\r
3975 if ( FAILED( result ) ) {
\r
3976 output->Release();
\r
3977 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3978 errorText_ = errorStream_.str();
\r
3982 // Check channel information.
\r
3983 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3984 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3985 errorText_ = errorStream_.str();
\r
3989 // Check format information. Use 16-bit format unless not
\r
3990 // supported or user requests 8-bit.
\r
3991 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3992 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3993 waveFormat.wBitsPerSample = 16;
\r
3994 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3997 waveFormat.wBitsPerSample = 8;
\r
3998 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4000 stream_.userFormat = format;
\r
4002 // Update wave format structure and buffer information.
\r
4003 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4004 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4005 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4007 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4008 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4009 dsBufferSize *= 2;
\r
4011 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
4012 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
4013 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
4014 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
4015 if ( FAILED( result ) ) {
\r
4016 output->Release();
\r
4017 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
4018 errorText_ = errorStream_.str();
\r
4022 // Even though we will write to the secondary buffer, we need to
\r
4023 // access the primary buffer to set the correct output format
\r
4024 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
4025 // buffer description.
\r
4026 DSBUFFERDESC bufferDescription;
\r
4027 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4028 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4029 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
4031 // Obtain the primary buffer
\r
4032 LPDIRECTSOUNDBUFFER buffer;
\r
4033 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4034 if ( FAILED( result ) ) {
\r
4035 output->Release();
\r
4036 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
4037 errorText_ = errorStream_.str();
\r
4041 // Set the primary DS buffer sound format.
\r
4042 result = buffer->SetFormat( &waveFormat );
\r
4043 if ( FAILED( result ) ) {
\r
4044 output->Release();
\r
4045 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
4046 errorText_ = errorStream_.str();
\r
4050 // Setup the secondary DS buffer description.
\r
4051 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4052 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4053 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4054 DSBCAPS_GLOBALFOCUS |
\r
4055 DSBCAPS_GETCURRENTPOSITION2 |
\r
4056 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4057 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4058 bufferDescription.lpwfxFormat = &waveFormat;
\r
4060 // Try to create the secondary DS buffer. If that doesn't work,
\r
4061 // try to use software mixing. Otherwise, there's a problem.
\r
4062 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4063 if ( FAILED( result ) ) {
\r
4064 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4065 DSBCAPS_GLOBALFOCUS |
\r
4066 DSBCAPS_GETCURRENTPOSITION2 |
\r
4067 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4068 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4069 if ( FAILED( result ) ) {
\r
4070 output->Release();
\r
4071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4072 errorText_ = errorStream_.str();
\r
4077 // Get the buffer size ... might be different from what we specified.
\r
4079 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4080 result = buffer->GetCaps( &dsbcaps );
\r
4081 if ( FAILED( result ) ) {
\r
4082 output->Release();
\r
4083 buffer->Release();
\r
4084 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4085 errorText_ = errorStream_.str();
\r
4089 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4091 // Lock the DS buffer
\r
4094 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4095 if ( FAILED( result ) ) {
\r
4096 output->Release();
\r
4097 buffer->Release();
\r
4098 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4099 errorText_ = errorStream_.str();
\r
4103 // Zero the DS buffer
\r
4104 ZeroMemory( audioPtr, dataLen );
\r
4106 // Unlock the DS buffer
\r
4107 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4108 if ( FAILED( result ) ) {
\r
4109 output->Release();
\r
4110 buffer->Release();
\r
4111 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4112 errorText_ = errorStream_.str();
\r
4116 ohandle = (void *) output;
\r
4117 bhandle = (void *) buffer;
\r
4120 if ( mode == INPUT ) {
\r
4122 LPDIRECTSOUNDCAPTURE input;
\r
4123 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4124 if ( FAILED( result ) ) {
\r
4125 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4126 errorText_ = errorStream_.str();
\r
4131 inCaps.dwSize = sizeof( inCaps );
\r
4132 result = input->GetCaps( &inCaps );
\r
4133 if ( FAILED( result ) ) {
\r
4135 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4136 errorText_ = errorStream_.str();
\r
4140 // Check channel information.
\r
4141 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4142 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4146 // Check format information. Use 16-bit format unless user
\r
4147 // requests 8-bit.
\r
4148 DWORD deviceFormats;
\r
4149 if ( channels + firstChannel == 2 ) {
\r
4150 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4151 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4152 waveFormat.wBitsPerSample = 8;
\r
4153 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4155 else { // assume 16-bit is supported
\r
4156 waveFormat.wBitsPerSample = 16;
\r
4157 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4160 else { // channel == 1
\r
4161 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4162 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4163 waveFormat.wBitsPerSample = 8;
\r
4164 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4166 else { // assume 16-bit is supported
\r
4167 waveFormat.wBitsPerSample = 16;
\r
4168 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4171 stream_.userFormat = format;
\r
4173 // Update wave format structure and buffer information.
\r
4174 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4175 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4176 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4178 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4179 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4180 dsBufferSize *= 2;
\r
4182 // Setup the secondary DS buffer description.
\r
4183 DSCBUFFERDESC bufferDescription;
\r
4184 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4185 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4186 bufferDescription.dwFlags = 0;
\r
4187 bufferDescription.dwReserved = 0;
\r
4188 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4189 bufferDescription.lpwfxFormat = &waveFormat;
\r
4191 // Create the capture buffer.
\r
4192 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4193 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4194 if ( FAILED( result ) ) {
\r
4196 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4197 errorText_ = errorStream_.str();
\r
4201 // Get the buffer size ... might be different from what we specified.
\r
4202 DSCBCAPS dscbcaps;
\r
4203 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4204 result = buffer->GetCaps( &dscbcaps );
\r
4205 if ( FAILED( result ) ) {
\r
4207 buffer->Release();
\r
4208 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4209 errorText_ = errorStream_.str();
\r
4213 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4215 // NOTE: We could have a problem here if this is a duplex stream
\r
4216 // and the play and capture hardware buffer sizes are different
\r
4217 // (I'm actually not sure if that is a problem or not).
\r
4218 // Currently, we are not verifying that.
\r
4220 // Lock the capture buffer
\r
4223 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4224 if ( FAILED( result ) ) {
\r
4226 buffer->Release();
\r
4227 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4228 errorText_ = errorStream_.str();
\r
4232 // Zero the buffer
\r
4233 ZeroMemory( audioPtr, dataLen );
\r
4235 // Unlock the buffer
\r
4236 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4237 if ( FAILED( result ) ) {
\r
4239 buffer->Release();
\r
4240 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4241 errorText_ = errorStream_.str();
\r
4245 ohandle = (void *) input;
\r
4246 bhandle = (void *) buffer;
\r
4249 // Set various stream parameters
\r
4250 DsHandle *handle = 0;
\r
4251 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4252 stream_.nUserChannels[mode] = channels;
\r
4253 stream_.bufferSize = *bufferSize;
\r
4254 stream_.channelOffset[mode] = firstChannel;
\r
4255 stream_.deviceInterleaved[mode] = true;
\r
4256 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4257 else stream_.userInterleaved = true;
\r
4259 // Set flag for buffer conversion
\r
4260 stream_.doConvertBuffer[mode] = false;
\r
4261 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4262 stream_.doConvertBuffer[mode] = true;
\r
4263 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4264 stream_.doConvertBuffer[mode] = true;
\r
4265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4266 stream_.nUserChannels[mode] > 1 )
\r
4267 stream_.doConvertBuffer[mode] = true;
\r
4269 // Allocate necessary internal buffers
\r
4270 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4271 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4272 if ( stream_.userBuffer[mode] == NULL ) {
\r
4273 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4277 if ( stream_.doConvertBuffer[mode] ) {
\r
4279 bool makeBuffer = true;
\r
4280 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4281 if ( mode == INPUT ) {
\r
4282 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4283 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4284 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4288 if ( makeBuffer ) {
\r
4289 bufferBytes *= *bufferSize;
\r
4290 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4291 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4292 if ( stream_.deviceBuffer == NULL ) {
\r
4293 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4299 // Allocate our DsHandle structures for the stream.
\r
4300 if ( stream_.apiHandle == 0 ) {
\r
4302 handle = new DsHandle;
\r
4304 catch ( std::bad_alloc& ) {
\r
4305 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4309 // Create a manual-reset event.
\r
4310 handle->condition = CreateEvent( NULL, // no security
\r
4311 TRUE, // manual-reset
\r
4312 FALSE, // non-signaled initially
\r
4313 NULL ); // unnamed
\r
4314 stream_.apiHandle = (void *) handle;
\r
4317 handle = (DsHandle *) stream_.apiHandle;
\r
4318 handle->id[mode] = ohandle;
\r
4319 handle->buffer[mode] = bhandle;
\r
4320 handle->dsBufferSize[mode] = dsBufferSize;
\r
4321 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4323 stream_.device[mode] = device;
\r
4324 stream_.state = STREAM_STOPPED;
\r
4325 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4326 // We had already set up an output stream.
\r
4327 stream_.mode = DUPLEX;
\r
4329 stream_.mode = mode;
\r
4330 stream_.nBuffers = nBuffers;
\r
4331 stream_.sampleRate = sampleRate;
\r
4333 // Setup the buffer conversion information structure.
\r
4334 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4336 // Setup the callback thread.
\r
4337 if ( stream_.callbackInfo.isRunning == false ) {
\r
4338 unsigned threadId;
\r
4339 stream_.callbackInfo.isRunning = true;
\r
4340 stream_.callbackInfo.object = (void *) this;
\r
4341 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4342 &stream_.callbackInfo, 0, &threadId );
\r
4343 if ( stream_.callbackInfo.thread == 0 ) {
\r
4344 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4348 // Boost DS thread priority
\r
4349 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4355 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4356 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4357 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4358 if ( buffer ) buffer->Release();
\r
4359 object->Release();
\r
4361 if ( handle->buffer[1] ) {
\r
4362 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4363 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4364 if ( buffer ) buffer->Release();
\r
4365 object->Release();
\r
4367 CloseHandle( handle->condition );
\r
4369 stream_.apiHandle = 0;
\r
4372 for ( int i=0; i<2; i++ ) {
\r
4373 if ( stream_.userBuffer[i] ) {
\r
4374 free( stream_.userBuffer[i] );
\r
4375 stream_.userBuffer[i] = 0;
\r
4379 if ( stream_.deviceBuffer ) {
\r
4380 free( stream_.deviceBuffer );
\r
4381 stream_.deviceBuffer = 0;
\r
4387 void RtApiDs :: closeStream()
\r
4389 if ( stream_.state == STREAM_CLOSED ) {
\r
4390 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4391 error( RtError::WARNING );
\r
4395 // Stop the callback thread.
\r
4396 stream_.callbackInfo.isRunning = false;
\r
4397 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4398 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4400 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4402 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4403 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4404 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4407 buffer->Release();
\r
4409 object->Release();
\r
4411 if ( handle->buffer[1] ) {
\r
4412 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4413 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4416 buffer->Release();
\r
4418 object->Release();
\r
4420 CloseHandle( handle->condition );
\r
4422 stream_.apiHandle = 0;
\r
4425 for ( int i=0; i<2; i++ ) {
\r
4426 if ( stream_.userBuffer[i] ) {
\r
4427 free( stream_.userBuffer[i] );
\r
4428 stream_.userBuffer[i] = 0;
\r
4432 if ( stream_.deviceBuffer ) {
\r
4433 free( stream_.deviceBuffer );
\r
4434 stream_.deviceBuffer = 0;
\r
4437 stream_.mode = UNINITIALIZED;
\r
4438 stream_.state = STREAM_CLOSED;
\r
4441 void RtApiDs :: startStream()
\r
4444 if ( stream_.state == STREAM_RUNNING ) {
\r
4445 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4446 error( RtError::WARNING );
\r
4450 //MUTEX_LOCK( &stream_.mutex );
\r
4452 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4454 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4455 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4456 // this is already in effect.
\r
4457 timeBeginPeriod( 1 );
\r
4459 buffersRolling = false;
\r
4460 duplexPrerollBytes = 0;
\r
4462 if ( stream_.mode == DUPLEX ) {
\r
4463 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4464 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4467 HRESULT result = 0;
\r
4468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4470 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4471 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4472 if ( FAILED( result ) ) {
\r
4473 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4474 errorText_ = errorStream_.str();
\r
4479 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4481 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4482 result = buffer->Start( DSCBSTART_LOOPING );
\r
4483 if ( FAILED( result ) ) {
\r
4484 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4485 errorText_ = errorStream_.str();
\r
4490 handle->drainCounter = 0;
\r
4491 handle->internalDrain = false;
\r
4492 ResetEvent( handle->condition );
\r
4493 stream_.state = STREAM_RUNNING;
\r
4496 // MUTEX_UNLOCK( &stream_.mutex );
\r
4498 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4501 void RtApiDs :: stopStream()
\r
4504 if ( stream_.state == STREAM_STOPPED ) {
\r
4505 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4506 error( RtError::WARNING );
\r
4511 MUTEX_LOCK( &stream_.mutex );
\r
4513 if ( stream_.state == STREAM_STOPPED ) {
\r
4514 MUTEX_UNLOCK( &stream_.mutex );
\r
4519 HRESULT result = 0;
\r
4522 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4523 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4524 if ( handle->drainCounter == 0 ) {
\r
4525 handle->drainCounter = 2;
\r
4526 // MUTEX_UNLOCK( &stream_.mutex );
\r
4527 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4528 //ResetEvent( handle->condition );
\r
4529 // MUTEX_LOCK( &stream_.mutex );
\r
4532 stream_.state = STREAM_STOPPED;
\r
4534 // Stop the buffer and clear memory
\r
4535 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4536 result = buffer->Stop();
\r
4537 if ( FAILED( result ) ) {
\r
4538 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4539 errorText_ = errorStream_.str();
\r
4543 // Lock the buffer and clear it so that if we start to play again,
\r
4544 // we won't have old data playing.
\r
4545 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4546 if ( FAILED( result ) ) {
\r
4547 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4548 errorText_ = errorStream_.str();
\r
4552 // Zero the DS buffer
\r
4553 ZeroMemory( audioPtr, dataLen );
\r
4555 // Unlock the DS buffer
\r
4556 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4557 if ( FAILED( result ) ) {
\r
4558 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4559 errorText_ = errorStream_.str();
\r
4563 // If we start playing again, we must begin at beginning of buffer.
\r
4564 handle->bufferPointer[0] = 0;
\r
4567 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4568 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4572 stream_.state = STREAM_STOPPED;
\r
4574 result = buffer->Stop();
\r
4575 if ( FAILED( result ) ) {
\r
4576 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4577 errorText_ = errorStream_.str();
\r
4581 // Lock the buffer and clear it so that if we start to play again,
\r
4582 // we won't have old data playing.
\r
4583 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4584 if ( FAILED( result ) ) {
\r
4585 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4586 errorText_ = errorStream_.str();
\r
4590 // Zero the DS buffer
\r
4591 ZeroMemory( audioPtr, dataLen );
\r
4593 // Unlock the DS buffer
\r
4594 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4595 if ( FAILED( result ) ) {
\r
4596 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4597 errorText_ = errorStream_.str();
\r
4601 // If we start recording again, we must begin at beginning of buffer.
\r
4602 handle->bufferPointer[1] = 0;
\r
4606 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4607 // MUTEX_UNLOCK( &stream_.mutex );
\r
4609 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4612 void RtApiDs :: abortStream()
\r
4615 if ( stream_.state == STREAM_STOPPED ) {
\r
4616 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4617 error( RtError::WARNING );
\r
4621 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4622 handle->drainCounter = 2;
\r
4627 void RtApiDs :: callbackEvent()
\r
4629 if ( stream_.state == STREAM_STOPPED ) {
\r
4630 Sleep( 50 ); // sleep 50 milliseconds
\r
4634 if ( stream_.state == STREAM_CLOSED ) {
\r
4635 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4636 error( RtError::WARNING );
\r
4640 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4641 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4643 // Check if we were draining the stream and signal is finished.
\r
4644 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4645 if ( handle->internalDrain == false )
\r
4646 SetEvent( handle->condition );
\r
4653 MUTEX_LOCK( &stream_.mutex );
\r
4655 // The state might change while waiting on a mutex.
\r
4656 if ( stream_.state == STREAM_STOPPED ) {
\r
4657 MUTEX_UNLOCK( &stream_.mutex );
\r
4662 // Invoke user callback to get fresh output data UNLESS we are
\r
4663 // draining stream.
\r
4664 if ( handle->drainCounter == 0 ) {
\r
4665 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4666 double streamTime = getStreamTime();
\r
4667 RtAudioStreamStatus status = 0;
\r
4668 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4669 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4670 handle->xrun[0] = false;
\r
4672 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4673 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4674 handle->xrun[1] = false;
\r
4676 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4677 stream_.bufferSize, streamTime, status, info->userData );
\r
4678 if ( cbReturnValue == 2 ) {
\r
4679 // MUTEX_UNLOCK( &stream_.mutex );
\r
4680 handle->drainCounter = 2;
\r
4684 else if ( cbReturnValue == 1 ) {
\r
4685 handle->drainCounter = 1;
\r
4686 handle->internalDrain = true;
\r
4691 DWORD currentWritePointer, safeWritePointer;
\r
4692 DWORD currentReadPointer, safeReadPointer;
\r
4693 UINT nextWritePointer;
\r
4695 LPVOID buffer1 = NULL;
\r
4696 LPVOID buffer2 = NULL;
\r
4697 DWORD bufferSize1 = 0;
\r
4698 DWORD bufferSize2 = 0;
\r
4703 if ( buffersRolling == false ) {
\r
4704 if ( stream_.mode == DUPLEX ) {
\r
4705 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4707 // It takes a while for the devices to get rolling. As a result,
\r
4708 // there's no guarantee that the capture and write device pointers
\r
4709 // will move in lockstep. Wait here for both devices to start
\r
4710 // rolling, and then set our buffer pointers accordingly.
\r
4711 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4712 // bytes later than the write buffer.
\r
4714 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4715 // take place between the two GetCurrentPosition calls... but I'm
\r
4716 // really not sure how to solve the problem. Temporarily boost to
\r
4717 // Realtime priority, maybe; but I'm not sure what priority the
\r
4718 // DirectSound service threads run at. We *should* be roughly
\r
4719 // within a ms or so of correct.
\r
4721 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4722 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4724 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4726 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4727 if ( FAILED( result ) ) {
\r
4728 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4729 errorText_ = errorStream_.str();
\r
4730 error( RtError::SYSTEM_ERROR );
\r
4732 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4733 if ( FAILED( result ) ) {
\r
4734 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4735 errorText_ = errorStream_.str();
\r
4736 error( RtError::SYSTEM_ERROR );
\r
4739 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4740 if ( FAILED( result ) ) {
\r
4741 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4742 errorText_ = errorStream_.str();
\r
4743 error( RtError::SYSTEM_ERROR );
\r
4745 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4746 if ( FAILED( result ) ) {
\r
4747 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4748 errorText_ = errorStream_.str();
\r
4749 error( RtError::SYSTEM_ERROR );
\r
4751 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4755 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4757 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4758 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4759 handle->bufferPointer[1] = safeReadPointer;
\r
4761 else if ( stream_.mode == OUTPUT ) {
\r
4763 // Set the proper nextWritePosition after initial startup.
\r
4764 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4765 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4766 if ( FAILED( result ) ) {
\r
4767 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4768 errorText_ = errorStream_.str();
\r
4769 error( RtError::SYSTEM_ERROR );
\r
4771 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4772 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4775 buffersRolling = true;
\r
4778 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4780 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4782 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4783 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4784 bufferBytes *= formatBytes( stream_.userFormat );
\r
4785 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4788 // Setup parameters and do buffer conversion if necessary.
\r
4789 if ( stream_.doConvertBuffer[0] ) {
\r
4790 buffer = stream_.deviceBuffer;
\r
4791 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4792 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4793 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4796 buffer = stream_.userBuffer[0];
\r
4797 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4798 bufferBytes *= formatBytes( stream_.userFormat );
\r
4801 // No byte swapping necessary in DirectSound implementation.
\r
4803 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4804 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4806 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4807 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4809 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4810 nextWritePointer = handle->bufferPointer[0];
\r
4812 DWORD endWrite, leadPointer;
\r
4814 // Find out where the read and "safe write" pointers are.
\r
4815 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4816 if ( FAILED( result ) ) {
\r
4817 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4818 errorText_ = errorStream_.str();
\r
4819 error( RtError::SYSTEM_ERROR );
\r
4822 // We will copy our output buffer into the region between
\r
4823 // safeWritePointer and leadPointer. If leadPointer is not
\r
4824 // beyond the next endWrite position, wait until it is.
\r
4825 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4826 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4827 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4828 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4829 endWrite = nextWritePointer + bufferBytes;
\r
4831 // Check whether the entire write region is behind the play pointer.
\r
4832 if ( leadPointer >= endWrite ) break;
\r
4834 // If we are here, then we must wait until the leadPointer advances
\r
4835 // beyond the end of our next write region. We use the
\r
4836 // Sleep() function to suspend operation until that happens.
\r
4837 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4838 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4839 if ( millis < 1.0 ) millis = 1.0;
\r
4840 Sleep( (DWORD) millis );
\r
4843 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4844 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4845 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4846 handle->xrun[0] = true;
\r
4847 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4848 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4849 handle->bufferPointer[0] = nextWritePointer;
\r
4850 endWrite = nextWritePointer + bufferBytes;
\r
4853 // Lock free space in the buffer
\r
4854 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4855 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4856 if ( FAILED( result ) ) {
\r
4857 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4858 errorText_ = errorStream_.str();
\r
4859 error( RtError::SYSTEM_ERROR );
\r
4862 // Copy our buffer into the DS buffer
\r
4863 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4864 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4866 // Update our buffer offset and unlock sound buffer
\r
4867 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4868 if ( FAILED( result ) ) {
\r
4869 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4870 errorText_ = errorStream_.str();
\r
4871 error( RtError::SYSTEM_ERROR );
\r
4873 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4874 handle->bufferPointer[0] = nextWritePointer;
\r
4876 if ( handle->drainCounter ) {
\r
4877 handle->drainCounter++;
\r
4882 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4884 // Setup parameters.
\r
4885 if ( stream_.doConvertBuffer[1] ) {
\r
4886 buffer = stream_.deviceBuffer;
\r
4887 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4888 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4891 buffer = stream_.userBuffer[1];
\r
4892 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4893 bufferBytes *= formatBytes( stream_.userFormat );
\r
4896 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4897 long nextReadPointer = handle->bufferPointer[1];
\r
4898 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4900 // Find out where the write and "safe read" pointers are.
\r
4901 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4902 if ( FAILED( result ) ) {
\r
4903 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4904 errorText_ = errorStream_.str();
\r
4905 error( RtError::SYSTEM_ERROR );
\r
4908 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4909 DWORD endRead = nextReadPointer + bufferBytes;
\r
4911 // Handling depends on whether we are INPUT or DUPLEX.
\r
4912 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4913 // then a wait here will drag the write pointers into the forbidden zone.
\r
4915 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4916 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4917 // practical way to sync up the read and write pointers reliably, given the
\r
4918 // the very complex relationship between phase and increment of the read and write
\r
4921 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4922 // provide a pre-roll period of 0.5 seconds in which we return
\r
4923 // zeros from the read buffer while the pointers sync up.
\r
4925 if ( stream_.mode == DUPLEX ) {
\r
4926 if ( safeReadPointer < endRead ) {
\r
4927 if ( duplexPrerollBytes <= 0 ) {
\r
4928 // Pre-roll time over. Be more agressive.
\r
4929 int adjustment = endRead-safeReadPointer;
\r
4931 handle->xrun[1] = true;
\r
4933 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4934 // and perform fine adjustments later.
\r
4935 // - small adjustments: back off by twice as much.
\r
4936 if ( adjustment >= 2*bufferBytes )
\r
4937 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4939 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4941 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4945 // In pre=roll time. Just do it.
\r
4946 nextReadPointer = safeReadPointer - bufferBytes;
\r
4947 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4949 endRead = nextReadPointer + bufferBytes;
\r
4952 else { // mode == INPUT
\r
4953 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
4954 // See comments for playback.
\r
4955 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4956 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4957 if ( millis < 1.0 ) millis = 1.0;
\r
4958 Sleep( (DWORD) millis );
\r
4960 // Wake up and find out where we are now.
\r
4961 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4962 if ( FAILED( result ) ) {
\r
4963 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4964 errorText_ = errorStream_.str();
\r
4965 error( RtError::SYSTEM_ERROR );
\r
4968 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4972 // Lock free space in the buffer
\r
4973 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4974 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4975 if ( FAILED( result ) ) {
\r
4976 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4977 errorText_ = errorStream_.str();
\r
4978 error( RtError::SYSTEM_ERROR );
\r
4981 if ( duplexPrerollBytes <= 0 ) {
\r
4982 // Copy our buffer into the DS buffer
\r
4983 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4984 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4987 memset( buffer, 0, bufferSize1 );
\r
4988 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4989 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4992 // Update our buffer offset and unlock sound buffer
\r
4993 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4994 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4995 if ( FAILED( result ) ) {
\r
4996 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4997 errorText_ = errorStream_.str();
\r
4998 error( RtError::SYSTEM_ERROR );
\r
5000 handle->bufferPointer[1] = nextReadPointer;
\r
5002 // No byte swapping necessary in DirectSound implementation.
\r
5004 // If necessary, convert 8-bit data from unsigned to signed.
\r
5005 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
5006 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
5008 // Do buffer conversion if necessary.
\r
5009 if ( stream_.doConvertBuffer[1] )
\r
5010 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
5014 // MUTEX_UNLOCK( &stream_.mutex );
\r
5016 RtApi::tickStreamTime();
\r
5019 // Definitions for utility functions and callbacks
\r
5020 // specific to the DirectSound implementation.
\r
5022 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
5024 CallbackInfo *info = (CallbackInfo *) ptr;
\r
5025 RtApiDs *object = (RtApiDs *) info->object;
\r
5026 bool* isRunning = &info->isRunning;
\r
5028 while ( *isRunning == true ) {
\r
5029 object->callbackEvent();
\r
5032 _endthreadex( 0 );
\r
5036 #include "tchar.h"
\r
5038 std::string convertTChar( LPCTSTR name )
\r
5040 #if defined( UNICODE ) || defined( _UNICODE )
\r
5041 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
5042 std::string s( length, 0 );
\r
5043 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
5045 std::string s( name );
\r
5051 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5052 LPCTSTR description,
\r
5054 LPVOID lpContext )
\r
5056 bool *isInput = (bool *) lpContext;
\r
5059 bool validDevice = false;
\r
5060 if ( *isInput == true ) {
\r
5062 LPDIRECTSOUNDCAPTURE object;
\r
5064 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5065 if ( hr != DS_OK ) return TRUE;
\r
5067 caps.dwSize = sizeof(caps);
\r
5068 hr = object->GetCaps( &caps );
\r
5069 if ( hr == DS_OK ) {
\r
5070 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5071 validDevice = true;
\r
5073 object->Release();
\r
5077 LPDIRECTSOUND object;
\r
5078 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5079 if ( hr != DS_OK ) return TRUE;
\r
5081 caps.dwSize = sizeof(caps);
\r
5082 hr = object->GetCaps( &caps );
\r
5083 if ( hr == DS_OK ) {
\r
5084 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5085 validDevice = true;
\r
5087 object->Release();
\r
5090 // If good device, then save its name and guid.
\r
5091 std::string name = convertTChar( description );
\r
5092 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5093 name = "Default Device";
\r
5094 if ( validDevice ) {
\r
5095 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5096 if ( dsDevices[i].name == name ) {
\r
5097 dsDevices[i].found = true;
\r
5099 dsDevices[i].id[1] = lpguid;
\r
5100 dsDevices[i].validId[1] = true;
\r
5103 dsDevices[i].id[0] = lpguid;
\r
5104 dsDevices[i].validId[0] = true;
\r
5111 device.name = name;
\r
5112 device.found = true;
\r
5114 device.id[1] = lpguid;
\r
5115 device.validId[1] = true;
\r
5118 device.id[0] = lpguid;
\r
5119 device.validId[0] = true;
\r
5121 dsDevices.push_back( device );
\r
5127 static const char* getErrorString( int code )
\r
5131 case DSERR_ALLOCATED:
\r
5132 return "Already allocated";
\r
5134 case DSERR_CONTROLUNAVAIL:
\r
5135 return "Control unavailable";
\r
5137 case DSERR_INVALIDPARAM:
\r
5138 return "Invalid parameter";
\r
5140 case DSERR_INVALIDCALL:
\r
5141 return "Invalid call";
\r
5143 case DSERR_GENERIC:
\r
5144 return "Generic error";
\r
5146 case DSERR_PRIOLEVELNEEDED:
\r
5147 return "Priority level needed";
\r
5149 case DSERR_OUTOFMEMORY:
\r
5150 return "Out of memory";
\r
5152 case DSERR_BADFORMAT:
\r
5153 return "The sample rate or the channel format is not supported";
\r
5155 case DSERR_UNSUPPORTED:
\r
5156 return "Not supported";
\r
5158 case DSERR_NODRIVER:
\r
5159 return "No driver";
\r
5161 case DSERR_ALREADYINITIALIZED:
\r
5162 return "Already initialized";
\r
5164 case DSERR_NOAGGREGATION:
\r
5165 return "No aggregation";
\r
5167 case DSERR_BUFFERLOST:
\r
5168 return "Buffer lost";
\r
5170 case DSERR_OTHERAPPHASPRIO:
\r
5171 return "Another application already has priority";
\r
5173 case DSERR_UNINITIALIZED:
\r
5174 return "Uninitialized";
\r
5177 return "DirectSound unknown error";
\r
5180 //******************** End of __WINDOWS_DS__ *********************//
\r
5184 #if defined(__LINUX_ALSA__)
\r
5186 #include <alsa/asoundlib.h>
\r
5187 #include <unistd.h>
\r
5189 // A structure to hold various information related to the ALSA API
\r
5190 // implementation.
\r
5191 struct AlsaHandle {
\r
5192 snd_pcm_t *handles[2];
\r
5193 bool synchronized;
\r
5195 pthread_cond_t runnable_cv;
\r
5199 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5202 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5204 RtApiAlsa :: RtApiAlsa()
\r
5206 // Nothing to do here.
\r
5209 RtApiAlsa :: ~RtApiAlsa()
\r
5211 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5214 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5216 unsigned nDevices = 0;
\r
5217 int result, subdevice, card;
\r
5219 snd_ctl_t *handle;
\r
5221 // Count cards and devices
\r
5223 snd_card_next( &card );
\r
5224 while ( card >= 0 ) {
\r
5225 sprintf( name, "hw:%d", card );
\r
5226 result = snd_ctl_open( &handle, name, 0 );
\r
5227 if ( result < 0 ) {
\r
5228 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5229 errorText_ = errorStream_.str();
\r
5230 error( RtError::WARNING );
\r
5235 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5236 if ( result < 0 ) {
\r
5237 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5238 errorText_ = errorStream_.str();
\r
5239 error( RtError::WARNING );
\r
5242 if ( subdevice < 0 )
\r
5247 snd_ctl_close( handle );
\r
5248 snd_card_next( &card );
\r
5254 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5256 RtAudio::DeviceInfo info;
\r
5257 info.probed = false;
\r
5259 unsigned nDevices = 0;
\r
5260 int result, subdevice, card;
\r
5262 snd_ctl_t *chandle;
\r
5264 // Count cards and devices
\r
5266 snd_card_next( &card );
\r
5267 while ( card >= 0 ) {
\r
5268 sprintf( name, "hw:%d", card );
\r
5269 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5270 if ( result < 0 ) {
\r
5271 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5272 errorText_ = errorStream_.str();
\r
5273 error( RtError::WARNING );
\r
5278 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5279 if ( result < 0 ) {
\r
5280 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5281 errorText_ = errorStream_.str();
\r
5282 error( RtError::WARNING );
\r
5285 if ( subdevice < 0 ) break;
\r
5286 if ( nDevices == device ) {
\r
5287 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5293 snd_ctl_close( chandle );
\r
5294 snd_card_next( &card );
\r
5297 if ( nDevices == 0 ) {
\r
5298 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5299 error( RtError::INVALID_USE );
\r
5302 if ( device >= nDevices ) {
\r
5303 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5304 error( RtError::INVALID_USE );
\r
5309 // If a stream is already open, we cannot probe the stream devices.
\r
5310 // Thus, use the saved results.
\r
5311 if ( stream_.state != STREAM_CLOSED &&
\r
5312 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5313 snd_ctl_close( chandle );
\r
5314 if ( device >= devices_.size() ) {
\r
5315 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5316 error( RtError::WARNING );
\r
5319 return devices_[ device ];
\r
5322 int openMode = SND_PCM_ASYNC;
\r
5323 snd_pcm_stream_t stream;
\r
5324 snd_pcm_info_t *pcminfo;
\r
5325 snd_pcm_info_alloca( &pcminfo );
\r
5326 snd_pcm_t *phandle;
\r
5327 snd_pcm_hw_params_t *params;
\r
5328 snd_pcm_hw_params_alloca( ¶ms );
\r
5330 // First try for playback
\r
5331 stream = SND_PCM_STREAM_PLAYBACK;
\r
5332 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5333 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5334 snd_pcm_info_set_stream( pcminfo, stream );
\r
5336 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5337 if ( result < 0 ) {
\r
5338 // Device probably doesn't support playback.
\r
5339 goto captureProbe;
\r
5342 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5343 if ( result < 0 ) {
\r
5344 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5345 errorText_ = errorStream_.str();
\r
5346 error( RtError::WARNING );
\r
5347 goto captureProbe;
\r
5350 // The device is open ... fill the parameter structure.
\r
5351 result = snd_pcm_hw_params_any( phandle, params );
\r
5352 if ( result < 0 ) {
\r
5353 snd_pcm_close( phandle );
\r
5354 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5355 errorText_ = errorStream_.str();
\r
5356 error( RtError::WARNING );
\r
5357 goto captureProbe;
\r
5360 // Get output channel information.
\r
5361 unsigned int value;
\r
5362 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5363 if ( result < 0 ) {
\r
5364 snd_pcm_close( phandle );
\r
5365 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5366 errorText_ = errorStream_.str();
\r
5367 error( RtError::WARNING );
\r
5368 goto captureProbe;
\r
5370 info.outputChannels = value;
\r
5371 snd_pcm_close( phandle );
\r
5374 // Now try for capture
\r
5375 stream = SND_PCM_STREAM_CAPTURE;
\r
5376 snd_pcm_info_set_stream( pcminfo, stream );
\r
5378 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5379 snd_ctl_close( chandle );
\r
5380 if ( result < 0 ) {
\r
5381 // Device probably doesn't support capture.
\r
5382 if ( info.outputChannels == 0 ) return info;
\r
5383 goto probeParameters;
\r
5386 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5387 if ( result < 0 ) {
\r
5388 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5389 errorText_ = errorStream_.str();
\r
5390 error( RtError::WARNING );
\r
5391 if ( info.outputChannels == 0 ) return info;
\r
5392 goto probeParameters;
\r
5395 // The device is open ... fill the parameter structure.
\r
5396 result = snd_pcm_hw_params_any( phandle, params );
\r
5397 if ( result < 0 ) {
\r
5398 snd_pcm_close( phandle );
\r
5399 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5400 errorText_ = errorStream_.str();
\r
5401 error( RtError::WARNING );
\r
5402 if ( info.outputChannels == 0 ) return info;
\r
5403 goto probeParameters;
\r
5406 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5407 if ( result < 0 ) {
\r
5408 snd_pcm_close( phandle );
\r
5409 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5410 errorText_ = errorStream_.str();
\r
5411 error( RtError::WARNING );
\r
5412 if ( info.outputChannels == 0 ) return info;
\r
5413 goto probeParameters;
\r
5415 info.inputChannels = value;
\r
5416 snd_pcm_close( phandle );
\r
5418 // If device opens for both playback and capture, we determine the channels.
\r
5419 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5420 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5422 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5423 if ( device == 0 && info.outputChannels > 0 )
\r
5424 info.isDefaultOutput = true;
\r
5425 if ( device == 0 && info.inputChannels > 0 )
\r
5426 info.isDefaultInput = true;
\r
5429 // At this point, we just need to figure out the supported data
\r
5430 // formats and sample rates. We'll proceed by opening the device in
\r
5431 // the direction with the maximum number of channels, or playback if
\r
5432 // they are equal. This might limit our sample rate options, but so
\r
5435 if ( info.outputChannels >= info.inputChannels )
\r
5436 stream = SND_PCM_STREAM_PLAYBACK;
\r
5438 stream = SND_PCM_STREAM_CAPTURE;
\r
5439 snd_pcm_info_set_stream( pcminfo, stream );
\r
5441 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5442 if ( result < 0 ) {
\r
5443 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5444 errorText_ = errorStream_.str();
\r
5445 error( RtError::WARNING );
\r
5449 // The device is open ... fill the parameter structure.
\r
5450 result = snd_pcm_hw_params_any( phandle, params );
\r
5451 if ( result < 0 ) {
\r
5452 snd_pcm_close( phandle );
\r
5453 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5454 errorText_ = errorStream_.str();
\r
5455 error( RtError::WARNING );
\r
5459 // Test our discrete set of sample rate values.
\r
5460 info.sampleRates.clear();
\r
5461 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5462 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5463 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5465 if ( info.sampleRates.size() == 0 ) {
\r
5466 snd_pcm_close( phandle );
\r
5467 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5468 errorText_ = errorStream_.str();
\r
5469 error( RtError::WARNING );
\r
5473 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5474 snd_pcm_format_t format;
\r
5475 info.nativeFormats = 0;
\r
5476 format = SND_PCM_FORMAT_S8;
\r
5477 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5478 info.nativeFormats |= RTAUDIO_SINT8;
\r
5479 format = SND_PCM_FORMAT_S16;
\r
5480 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5481 info.nativeFormats |= RTAUDIO_SINT16;
\r
5482 format = SND_PCM_FORMAT_S24;
\r
5483 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5484 info.nativeFormats |= RTAUDIO_SINT24;
\r
5485 format = SND_PCM_FORMAT_S32;
\r
5486 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5487 info.nativeFormats |= RTAUDIO_SINT32;
\r
5488 format = SND_PCM_FORMAT_FLOAT;
\r
5489 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5490 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5491 format = SND_PCM_FORMAT_FLOAT64;
\r
5492 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5493 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5495 // Check that we have at least one supported format
\r
5496 if ( info.nativeFormats == 0 ) {
\r
5497 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5498 errorText_ = errorStream_.str();
\r
5499 error( RtError::WARNING );
\r
5503 // Get the device name
\r
5505 result = snd_card_get_name( card, &cardname );
\r
5506 if ( result >= 0 )
\r
5507 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5510 // That's all ... close the device and return
\r
5511 snd_pcm_close( phandle );
\r
5512 info.probed = true;
\r
5516 void RtApiAlsa :: saveDeviceInfo( void )
\r
5520 unsigned int nDevices = getDeviceCount();
\r
5521 devices_.resize( nDevices );
\r
5522 for ( unsigned int i=0; i<nDevices; i++ )
\r
5523 devices_[i] = getDeviceInfo( i );
\r
5526 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5527 unsigned int firstChannel, unsigned int sampleRate,
\r
5528 RtAudioFormat format, unsigned int *bufferSize,
\r
5529 RtAudio::StreamOptions *options )
\r
5532 #if defined(__RTAUDIO_DEBUG__)
\r
5533 snd_output_t *out;
\r
5534 snd_output_stdio_attach(&out, stderr, 0);
\r
5537 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5539 unsigned nDevices = 0;
\r
5540 int result, subdevice, card;
\r
5542 snd_ctl_t *chandle;
\r
5544 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5545 snprintf(name, sizeof(name), "%s", "default");
\r
5547 // Count cards and devices
\r
5549 snd_card_next( &card );
\r
5550 while ( card >= 0 ) {
\r
5551 sprintf( name, "hw:%d", card );
\r
5552 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5553 if ( result < 0 ) {
\r
5554 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5555 errorText_ = errorStream_.str();
\r
5560 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5561 if ( result < 0 ) break;
\r
5562 if ( subdevice < 0 ) break;
\r
5563 if ( nDevices == device ) {
\r
5564 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5565 snd_ctl_close( chandle );
\r
5570 snd_ctl_close( chandle );
\r
5571 snd_card_next( &card );
\r
5574 if ( nDevices == 0 ) {
\r
5575 // This should not happen because a check is made before this function is called.
\r
5576 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5580 if ( device >= nDevices ) {
\r
5581 // This should not happen because a check is made before this function is called.
\r
5582 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5589 // The getDeviceInfo() function will not work for a device that is
\r
5590 // already open. Thus, we'll probe the system before opening a
\r
5591 // stream and save the results for use by getDeviceInfo().
\r
5592 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5593 this->saveDeviceInfo();
\r
5595 snd_pcm_stream_t stream;
\r
5596 if ( mode == OUTPUT )
\r
5597 stream = SND_PCM_STREAM_PLAYBACK;
\r
5599 stream = SND_PCM_STREAM_CAPTURE;
\r
5601 snd_pcm_t *phandle;
\r
5602 int openMode = SND_PCM_ASYNC;
\r
5603 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5604 if ( result < 0 ) {
\r
5605 if ( mode == OUTPUT )
\r
5606 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5608 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5609 errorText_ = errorStream_.str();
\r
5613 // Fill the parameter structure.
\r
5614 snd_pcm_hw_params_t *hw_params;
\r
5615 snd_pcm_hw_params_alloca( &hw_params );
\r
5616 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5617 if ( result < 0 ) {
\r
5618 snd_pcm_close( phandle );
\r
5619 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5620 errorText_ = errorStream_.str();
\r
5624 #if defined(__RTAUDIO_DEBUG__)
\r
5625 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5626 snd_pcm_hw_params_dump( hw_params, out );
\r
5629 // Set access ... check user preference.
\r
5630 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5631 stream_.userInterleaved = false;
\r
5632 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5633 if ( result < 0 ) {
\r
5634 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5635 stream_.deviceInterleaved[mode] = true;
\r
5638 stream_.deviceInterleaved[mode] = false;
\r
5641 stream_.userInterleaved = true;
\r
5642 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5643 if ( result < 0 ) {
\r
5644 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5645 stream_.deviceInterleaved[mode] = false;
\r
5648 stream_.deviceInterleaved[mode] = true;
\r
5651 if ( result < 0 ) {
\r
5652 snd_pcm_close( phandle );
\r
5653 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5654 errorText_ = errorStream_.str();
\r
5658 // Determine how to set the device format.
\r
5659 stream_.userFormat = format;
\r
5660 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5662 if ( format == RTAUDIO_SINT8 )
\r
5663 deviceFormat = SND_PCM_FORMAT_S8;
\r
5664 else if ( format == RTAUDIO_SINT16 )
\r
5665 deviceFormat = SND_PCM_FORMAT_S16;
\r
5666 else if ( format == RTAUDIO_SINT24 )
\r
5667 deviceFormat = SND_PCM_FORMAT_S24;
\r
5668 else if ( format == RTAUDIO_SINT32 )
\r
5669 deviceFormat = SND_PCM_FORMAT_S32;
\r
5670 else if ( format == RTAUDIO_FLOAT32 )
\r
5671 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5672 else if ( format == RTAUDIO_FLOAT64 )
\r
5673 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5675 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5676 stream_.deviceFormat[mode] = format;
\r
5680 // The user requested format is not natively supported by the device.
\r
5681 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5682 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5683 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5687 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5688 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5689 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5693 deviceFormat = SND_PCM_FORMAT_S32;
\r
5694 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5695 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5699 deviceFormat = SND_PCM_FORMAT_S24;
\r
5700 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5701 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5705 deviceFormat = SND_PCM_FORMAT_S16;
\r
5706 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5711 deviceFormat = SND_PCM_FORMAT_S8;
\r
5712 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5713 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5717 // If we get here, no supported format was found.
\r
5718 snd_pcm_close( phandle );
\r
5719 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5720 errorText_ = errorStream_.str();
\r
5724 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5725 if ( result < 0 ) {
\r
5726 snd_pcm_close( phandle );
\r
5727 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5728 errorText_ = errorStream_.str();
\r
5732 // Determine whether byte-swaping is necessary.
\r
5733 stream_.doByteSwap[mode] = false;
\r
5734 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5735 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5736 if ( result == 0 )
\r
5737 stream_.doByteSwap[mode] = true;
\r
5738 else if (result < 0) {
\r
5739 snd_pcm_close( phandle );
\r
5740 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5741 errorText_ = errorStream_.str();
\r
5746 // Set the sample rate.
\r
5747 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5748 if ( result < 0 ) {
\r
5749 snd_pcm_close( phandle );
\r
5750 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5751 errorText_ = errorStream_.str();
\r
5755 // Determine the number of channels for this device. We support a possible
\r
5756 // minimum device channel number > than the value requested by the user.
\r
5757 stream_.nUserChannels[mode] = channels;
\r
5758 unsigned int value;
\r
5759 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5760 unsigned int deviceChannels = value;
\r
5761 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5762 snd_pcm_close( phandle );
\r
5763 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5764 errorText_ = errorStream_.str();
\r
5768 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5769 if ( result < 0 ) {
\r
5770 snd_pcm_close( phandle );
\r
5771 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5772 errorText_ = errorStream_.str();
\r
5775 deviceChannels = value;
\r
5776 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5777 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5779 // Set the device channels.
\r
5780 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5781 if ( result < 0 ) {
\r
5782 snd_pcm_close( phandle );
\r
5783 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5784 errorText_ = errorStream_.str();
\r
5788 // Set the buffer (or period) size.
\r
5790 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5791 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5792 if ( result < 0 ) {
\r
5793 snd_pcm_close( phandle );
\r
5794 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5795 errorText_ = errorStream_.str();
\r
5798 *bufferSize = periodSize;
\r
5800 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5801 unsigned int periods = 0;
\r
5802 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5803 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5804 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5805 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5806 if ( result < 0 ) {
\r
5807 snd_pcm_close( phandle );
\r
5808 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5809 errorText_ = errorStream_.str();
\r
5813 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5814 // MUST be the same in both directions!
\r
5815 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5816 snd_pcm_close( phandle );
\r
5817 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5818 errorText_ = errorStream_.str();
\r
5822 stream_.bufferSize = *bufferSize;
\r
5824 // Install the hardware configuration
\r
5825 result = snd_pcm_hw_params( phandle, hw_params );
\r
5826 if ( result < 0 ) {
\r
5827 snd_pcm_close( phandle );
\r
5828 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5829 errorText_ = errorStream_.str();
\r
5833 #if defined(__RTAUDIO_DEBUG__)
\r
5834 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5835 snd_pcm_hw_params_dump( hw_params, out );
\r
5838 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5839 snd_pcm_sw_params_t *sw_params = NULL;
\r
5840 snd_pcm_sw_params_alloca( &sw_params );
\r
5841 snd_pcm_sw_params_current( phandle, sw_params );
\r
5842 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5843 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5844 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5846 // The following two settings were suggested by Theo Veenker
\r
5847 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5848 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5850 // here are two options for a fix
\r
5851 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5852 snd_pcm_uframes_t val;
\r
5853 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5854 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5856 result = snd_pcm_sw_params( phandle, sw_params );
\r
5857 if ( result < 0 ) {
\r
5858 snd_pcm_close( phandle );
\r
5859 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5860 errorText_ = errorStream_.str();
\r
5864 #if defined(__RTAUDIO_DEBUG__)
\r
5865 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5866 snd_pcm_sw_params_dump( sw_params, out );
\r
5869 // Set flags for buffer conversion
\r
5870 stream_.doConvertBuffer[mode] = false;
\r
5871 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5872 stream_.doConvertBuffer[mode] = true;
\r
5873 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5874 stream_.doConvertBuffer[mode] = true;
\r
5875 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5876 stream_.nUserChannels[mode] > 1 )
\r
5877 stream_.doConvertBuffer[mode] = true;
\r
5879 // Allocate the ApiHandle if necessary and then save.
\r
5880 AlsaHandle *apiInfo = 0;
\r
5881 if ( stream_.apiHandle == 0 ) {
\r
5883 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5885 catch ( std::bad_alloc& ) {
\r
5886 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5890 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5891 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5895 stream_.apiHandle = (void *) apiInfo;
\r
5896 apiInfo->handles[0] = 0;
\r
5897 apiInfo->handles[1] = 0;
\r
5900 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5902 apiInfo->handles[mode] = phandle;
\r
5905 // Allocate necessary internal buffers.
\r
5906 unsigned long bufferBytes;
\r
5907 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5908 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5909 if ( stream_.userBuffer[mode] == NULL ) {
\r
5910 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5914 if ( stream_.doConvertBuffer[mode] ) {
\r
5916 bool makeBuffer = true;
\r
5917 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5918 if ( mode == INPUT ) {
\r
5919 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5920 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5921 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5925 if ( makeBuffer ) {
\r
5926 bufferBytes *= *bufferSize;
\r
5927 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5928 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5929 if ( stream_.deviceBuffer == NULL ) {
\r
5930 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5936 stream_.sampleRate = sampleRate;
\r
5937 stream_.nBuffers = periods;
\r
5938 stream_.device[mode] = device;
\r
5939 stream_.state = STREAM_STOPPED;
\r
5941 // Setup the buffer conversion information structure.
\r
5942 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5944 // Setup thread if necessary.
\r
5945 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5946 // We had already set up an output stream.
\r
5947 stream_.mode = DUPLEX;
\r
5948 // Link the streams if possible.
\r
5949 apiInfo->synchronized = false;
\r
5950 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5951 apiInfo->synchronized = true;
\r
5953 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5954 error( RtError::WARNING );
\r
5958 stream_.mode = mode;
\r
5960 // Setup callback thread.
\r
5961 stream_.callbackInfo.object = (void *) this;
\r
5963 // Set the thread attributes for joinable and realtime scheduling
\r
5964 // priority (optional). The higher priority will only take affect
\r
5965 // if the program is run as root or suid. Note, under Linux
\r
5966 // processes with CAP_SYS_NICE privilege, a user can change
\r
5967 // scheduling policy and priority (thus need not be root). See
\r
5968 // POSIX "capabilities".
\r
5969 pthread_attr_t attr;
\r
5970 pthread_attr_init( &attr );
\r
5971 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5972 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5973 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5974 struct sched_param param;
\r
5975 int priority = options->priority;
\r
5976 int min = sched_get_priority_min( SCHED_RR );
\r
5977 int max = sched_get_priority_max( SCHED_RR );
\r
5978 if ( priority < min ) priority = min;
\r
5979 else if ( priority > max ) priority = max;
\r
5980 param.sched_priority = priority;
\r
5981 pthread_attr_setschedparam( &attr, ¶m );
\r
5982 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5985 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5987 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5990 stream_.callbackInfo.isRunning = true;
\r
5991 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5992 pthread_attr_destroy( &attr );
\r
5994 stream_.callbackInfo.isRunning = false;
\r
5995 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
6004 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6005 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6006 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6008 stream_.apiHandle = 0;
\r
6011 if ( phandle) snd_pcm_close( phandle );
\r
6013 for ( int i=0; i<2; i++ ) {
\r
6014 if ( stream_.userBuffer[i] ) {
\r
6015 free( stream_.userBuffer[i] );
\r
6016 stream_.userBuffer[i] = 0;
\r
6020 if ( stream_.deviceBuffer ) {
\r
6021 free( stream_.deviceBuffer );
\r
6022 stream_.deviceBuffer = 0;
\r
6028 void RtApiAlsa :: closeStream()
\r
6030 if ( stream_.state == STREAM_CLOSED ) {
\r
6031 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
6032 error( RtError::WARNING );
\r
6036 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6037 stream_.callbackInfo.isRunning = false;
\r
6038 MUTEX_LOCK( &stream_.mutex );
\r
6039 if ( stream_.state == STREAM_STOPPED ) {
\r
6040 apiInfo->runnable = true;
\r
6041 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6043 MUTEX_UNLOCK( &stream_.mutex );
\r
6044 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6046 if ( stream_.state == STREAM_RUNNING ) {
\r
6047 stream_.state = STREAM_STOPPED;
\r
6048 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6049 snd_pcm_drop( apiInfo->handles[0] );
\r
6050 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
6051 snd_pcm_drop( apiInfo->handles[1] );
\r
6055 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
6056 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
6057 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6059 stream_.apiHandle = 0;
\r
6062 for ( int i=0; i<2; i++ ) {
\r
6063 if ( stream_.userBuffer[i] ) {
\r
6064 free( stream_.userBuffer[i] );
\r
6065 stream_.userBuffer[i] = 0;
\r
6069 if ( stream_.deviceBuffer ) {
\r
6070 free( stream_.deviceBuffer );
\r
6071 stream_.deviceBuffer = 0;
\r
6074 stream_.mode = UNINITIALIZED;
\r
6075 stream_.state = STREAM_CLOSED;
\r
6078 void RtApiAlsa :: startStream()
\r
6080 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6083 if ( stream_.state == STREAM_RUNNING ) {
\r
6084 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6085 error( RtError::WARNING );
\r
6089 MUTEX_LOCK( &stream_.mutex );
\r
6092 snd_pcm_state_t state;
\r
6093 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6094 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6095 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6096 state = snd_pcm_state( handle[0] );
\r
6097 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6098 result = snd_pcm_prepare( handle[0] );
\r
6099 if ( result < 0 ) {
\r
6100 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6101 errorText_ = errorStream_.str();
\r
6107 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6108 state = snd_pcm_state( handle[1] );
\r
6109 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6110 result = snd_pcm_prepare( handle[1] );
\r
6111 if ( result < 0 ) {
\r
6112 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6113 errorText_ = errorStream_.str();
\r
6119 stream_.state = STREAM_RUNNING;
\r
6122 apiInfo->runnable = true;
\r
6123 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6124 MUTEX_UNLOCK( &stream_.mutex );
\r
6126 if ( result >= 0 ) return;
\r
6127 error( RtError::SYSTEM_ERROR );
\r
6130 void RtApiAlsa :: stopStream()
\r
6133 if ( stream_.state == STREAM_STOPPED ) {
\r
6134 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6135 error( RtError::WARNING );
\r
6139 stream_.state = STREAM_STOPPED;
\r
6140 MUTEX_LOCK( &stream_.mutex );
\r
6142 //if ( stream_.state == STREAM_STOPPED ) {
\r
6143 // MUTEX_UNLOCK( &stream_.mutex );
\r
6148 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6149 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6150 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6151 if ( apiInfo->synchronized )
\r
6152 result = snd_pcm_drop( handle[0] );
\r
6154 result = snd_pcm_drain( handle[0] );
\r
6155 if ( result < 0 ) {
\r
6156 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6157 errorText_ = errorStream_.str();
\r
6162 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6163 result = snd_pcm_drop( handle[1] );
\r
6164 if ( result < 0 ) {
\r
6165 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6166 errorText_ = errorStream_.str();
\r
6172 stream_.state = STREAM_STOPPED;
\r
6173 MUTEX_UNLOCK( &stream_.mutex );
\r
6175 if ( result >= 0 ) return;
\r
6176 error( RtError::SYSTEM_ERROR );
\r
6179 void RtApiAlsa :: abortStream()
\r
6182 if ( stream_.state == STREAM_STOPPED ) {
\r
6183 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6184 error( RtError::WARNING );
\r
6188 stream_.state = STREAM_STOPPED;
\r
6189 MUTEX_LOCK( &stream_.mutex );
\r
6191 //if ( stream_.state == STREAM_STOPPED ) {
\r
6192 // MUTEX_UNLOCK( &stream_.mutex );
\r
6197 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6198 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6199 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6200 result = snd_pcm_drop( handle[0] );
\r
6201 if ( result < 0 ) {
\r
6202 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6203 errorText_ = errorStream_.str();
\r
6208 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6209 result = snd_pcm_drop( handle[1] );
\r
6210 if ( result < 0 ) {
\r
6211 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6212 errorText_ = errorStream_.str();
\r
6218 stream_.state = STREAM_STOPPED;
\r
6219 MUTEX_UNLOCK( &stream_.mutex );
\r
6221 if ( result >= 0 ) return;
\r
6222 error( RtError::SYSTEM_ERROR );
\r
6225 void RtApiAlsa :: callbackEvent()
\r
6227 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6228 if ( stream_.state == STREAM_STOPPED ) {
\r
6229 MUTEX_LOCK( &stream_.mutex );
\r
6230 while ( !apiInfo->runnable )
\r
6231 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6233 if ( stream_.state != STREAM_RUNNING ) {
\r
6234 MUTEX_UNLOCK( &stream_.mutex );
\r
6237 MUTEX_UNLOCK( &stream_.mutex );
\r
6240 if ( stream_.state == STREAM_CLOSED ) {
\r
6241 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6242 error( RtError::WARNING );
\r
6246 int doStopStream = 0;
\r
6247 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6248 double streamTime = getStreamTime();
\r
6249 RtAudioStreamStatus status = 0;
\r
6250 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6251 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6252 apiInfo->xrun[0] = false;
\r
6254 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6255 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6256 apiInfo->xrun[1] = false;
\r
6258 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6259 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6261 if ( doStopStream == 2 ) {
\r
6266 MUTEX_LOCK( &stream_.mutex );
\r
6268 // The state might change while waiting on a mutex.
\r
6269 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6274 snd_pcm_t **handle;
\r
6275 snd_pcm_sframes_t frames;
\r
6276 RtAudioFormat format;
\r
6277 handle = (snd_pcm_t **) apiInfo->handles;
\r
6279 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6281 // Setup parameters.
\r
6282 if ( stream_.doConvertBuffer[1] ) {
\r
6283 buffer = stream_.deviceBuffer;
\r
6284 channels = stream_.nDeviceChannels[1];
\r
6285 format = stream_.deviceFormat[1];
\r
6288 buffer = stream_.userBuffer[1];
\r
6289 channels = stream_.nUserChannels[1];
\r
6290 format = stream_.userFormat;
\r
6293 // Read samples from device in interleaved/non-interleaved format.
\r
6294 if ( stream_.deviceInterleaved[1] )
\r
6295 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6297 void *bufs[channels];
\r
6298 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6299 for ( int i=0; i<channels; i++ )
\r
6300 bufs[i] = (void *) (buffer + (i * offset));
\r
6301 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6304 if ( result < (int) stream_.bufferSize ) {
\r
6305 // Either an error or overrun occured.
\r
6306 if ( result == -EPIPE ) {
\r
6307 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6308 if ( state == SND_PCM_STATE_XRUN ) {
\r
6309 apiInfo->xrun[1] = true;
\r
6310 result = snd_pcm_prepare( handle[1] );
\r
6311 if ( result < 0 ) {
\r
6312 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6313 errorText_ = errorStream_.str();
\r
6317 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6318 errorText_ = errorStream_.str();
\r
6322 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6323 errorText_ = errorStream_.str();
\r
6325 error( RtError::WARNING );
\r
6329 // Do byte swapping if necessary.
\r
6330 if ( stream_.doByteSwap[1] )
\r
6331 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6333 // Do buffer conversion if necessary.
\r
6334 if ( stream_.doConvertBuffer[1] )
\r
6335 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6337 // Check stream latency
\r
6338 result = snd_pcm_delay( handle[1], &frames );
\r
6339 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6344 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6346 // Setup parameters and do buffer conversion if necessary.
\r
6347 if ( stream_.doConvertBuffer[0] ) {
\r
6348 buffer = stream_.deviceBuffer;
\r
6349 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6350 channels = stream_.nDeviceChannels[0];
\r
6351 format = stream_.deviceFormat[0];
\r
6354 buffer = stream_.userBuffer[0];
\r
6355 channels = stream_.nUserChannels[0];
\r
6356 format = stream_.userFormat;
\r
6359 // Do byte swapping if necessary.
\r
6360 if ( stream_.doByteSwap[0] )
\r
6361 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6363 // Write samples to device in interleaved/non-interleaved format.
\r
6364 if ( stream_.deviceInterleaved[0] )
\r
6365 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6367 void *bufs[channels];
\r
6368 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6369 for ( int i=0; i<channels; i++ )
\r
6370 bufs[i] = (void *) (buffer + (i * offset));
\r
6371 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6374 if ( result < (int) stream_.bufferSize ) {
\r
6375 // Either an error or underrun occured.
\r
6376 if ( result == -EPIPE ) {
\r
6377 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6378 if ( state == SND_PCM_STATE_XRUN ) {
\r
6379 apiInfo->xrun[0] = true;
\r
6380 result = snd_pcm_prepare( handle[0] );
\r
6381 if ( result < 0 ) {
\r
6382 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6383 errorText_ = errorStream_.str();
\r
6387 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6388 errorText_ = errorStream_.str();
\r
6392 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6393 errorText_ = errorStream_.str();
\r
6395 error( RtError::WARNING );
\r
6399 // Check stream latency
\r
6400 result = snd_pcm_delay( handle[0], &frames );
\r
6401 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6405 MUTEX_UNLOCK( &stream_.mutex );
\r
6407 RtApi::tickStreamTime();
\r
6408 if ( doStopStream == 1 ) this->stopStream();
\r
6411 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6413 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6414 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6415 bool *isRunning = &info->isRunning;
\r
6417 while ( *isRunning == true ) {
\r
6418 pthread_testcancel();
\r
6419 object->callbackEvent();
\r
6422 pthread_exit( NULL );
\r
6425 //******************** End of __LINUX_ALSA__ *********************//
\r
6429 #if defined(__LINUX_OSS__)
\r
6431 #include <unistd.h>
\r
6432 #include <sys/ioctl.h>
\r
6433 #include <unistd.h>
\r
6434 #include <fcntl.h>
\r
6435 #include "soundcard.h"
\r
6436 #include <errno.h>
\r
6439 extern "C" void *ossCallbackHandler(void * ptr);
\r
6441 // A structure to hold various information related to the OSS API
\r
6442 // implementation.
\r
6443 struct OssHandle {
\r
6444 int id[2]; // device ids
\r
6447 pthread_cond_t runnable;
\r
6450 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6453 RtApiOss :: RtApiOss()
\r
6455 // Nothing to do here.
\r
6458 RtApiOss :: ~RtApiOss()
\r
6460 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6463 unsigned int RtApiOss :: getDeviceCount( void )
\r
6465 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6466 if ( mixerfd == -1 ) {
\r
6467 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6468 error( RtError::WARNING );
\r
6472 oss_sysinfo sysinfo;
\r
6473 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6475 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6476 error( RtError::WARNING );
\r
6481 return sysinfo.numaudios;
\r
6484 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6486 RtAudio::DeviceInfo info;
\r
6487 info.probed = false;
\r
6489 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6490 if ( mixerfd == -1 ) {
\r
6491 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6492 error( RtError::WARNING );
\r
6496 oss_sysinfo sysinfo;
\r
6497 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6498 if ( result == -1 ) {
\r
6500 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6501 error( RtError::WARNING );
\r
6505 unsigned nDevices = sysinfo.numaudios;
\r
6506 if ( nDevices == 0 ) {
\r
6508 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6509 error( RtError::INVALID_USE );
\r
6512 if ( device >= nDevices ) {
\r
6514 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6515 error( RtError::INVALID_USE );
\r
6518 oss_audioinfo ainfo;
\r
6519 ainfo.dev = device;
\r
6520 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6522 if ( result == -1 ) {
\r
6523 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6524 errorText_ = errorStream_.str();
\r
6525 error( RtError::WARNING );
\r
6530 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6531 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6532 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6533 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6534 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6537 // Probe data formats ... do for input
\r
6538 unsigned long mask = ainfo.iformats;
\r
6539 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6540 info.nativeFormats |= RTAUDIO_SINT16;
\r
6541 if ( mask & AFMT_S8 )
\r
6542 info.nativeFormats |= RTAUDIO_SINT8;
\r
6543 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6544 info.nativeFormats |= RTAUDIO_SINT32;
\r
6545 if ( mask & AFMT_FLOAT )
\r
6546 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6547 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6548 info.nativeFormats |= RTAUDIO_SINT24;
\r
6550 // Check that we have at least one supported format
\r
6551 if ( info.nativeFormats == 0 ) {
\r
6552 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6553 errorText_ = errorStream_.str();
\r
6554 error( RtError::WARNING );
\r
6558 // Probe the supported sample rates.
\r
6559 info.sampleRates.clear();
\r
6560 if ( ainfo.nrates ) {
\r
6561 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6562 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6563 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6564 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6571 // Check min and max rate values;
\r
6572 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6573 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6574 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6578 if ( info.sampleRates.size() == 0 ) {
\r
6579 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6580 errorText_ = errorStream_.str();
\r
6581 error( RtError::WARNING );
\r
6584 info.probed = true;
\r
6585 info.name = ainfo.name;
\r
6592 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6593 unsigned int firstChannel, unsigned int sampleRate,
\r
6594 RtAudioFormat format, unsigned int *bufferSize,
\r
6595 RtAudio::StreamOptions *options )
\r
6597 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6598 if ( mixerfd == -1 ) {
\r
6599 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6603 oss_sysinfo sysinfo;
\r
6604 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6605 if ( result == -1 ) {
\r
6607 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6611 unsigned nDevices = sysinfo.numaudios;
\r
6612 if ( nDevices == 0 ) {
\r
6613 // This should not happen because a check is made before this function is called.
\r
6615 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6619 if ( device >= nDevices ) {
\r
6620 // This should not happen because a check is made before this function is called.
\r
6622 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6626 oss_audioinfo ainfo;
\r
6627 ainfo.dev = device;
\r
6628 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6630 if ( result == -1 ) {
\r
6631 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6632 errorText_ = errorStream_.str();
\r
6636 // Check if device supports input or output
\r
6637 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6638 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6639 if ( mode == OUTPUT )
\r
6640 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6642 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6643 errorText_ = errorStream_.str();
\r
6648 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6649 if ( mode == OUTPUT )
\r
6650 flags |= O_WRONLY;
\r
6651 else { // mode == INPUT
\r
6652 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6653 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6654 close( handle->id[0] );
\r
6655 handle->id[0] = 0;
\r
6656 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6657 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6658 errorText_ = errorStream_.str();
\r
6661 // Check that the number previously set channels is the same.
\r
6662 if ( stream_.nUserChannels[0] != channels ) {
\r
6663 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6664 errorText_ = errorStream_.str();
\r
6670 flags |= O_RDONLY;
\r
6673 // Set exclusive access if specified.
\r
6674 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
6676 // Try to open the device.
\r
6678 fd = open( ainfo.devnode, flags, 0 );
\r
6680 if ( errno == EBUSY )
\r
6681 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
6683 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
6684 errorText_ = errorStream_.str();
\r
6688 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
6690 if ( flags | O_RDWR ) {
\r
6691 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
6692 if ( result == -1) {
\r
6693 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
6694 errorText_ = errorStream_.str();
\r
6700 // Check the device channel support.
\r
6701 stream_.nUserChannels[mode] = channels;
\r
6702 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
6704 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
6705 errorText_ = errorStream_.str();
\r
6709 // Set the number of channels.
\r
6710 int deviceChannels = channels + firstChannel;
\r
6711 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
6712 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
6714 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
6715 errorText_ = errorStream_.str();
\r
6718 stream_.nDeviceChannels[mode] = deviceChannels;
\r
6720 // Get the data format mask
\r
6722 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
6723 if ( result == -1 ) {
\r
6725 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
6726 errorText_ = errorStream_.str();
\r
6730 // Determine how to set the device format.
\r
6731 stream_.userFormat = format;
\r
6732 int deviceFormat = -1;
\r
6733 stream_.doByteSwap[mode] = false;
\r
6734 if ( format == RTAUDIO_SINT8 ) {
\r
6735 if ( mask & AFMT_S8 ) {
\r
6736 deviceFormat = AFMT_S8;
\r
6737 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6740 else if ( format == RTAUDIO_SINT16 ) {
\r
6741 if ( mask & AFMT_S16_NE ) {
\r
6742 deviceFormat = AFMT_S16_NE;
\r
6743 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6745 else if ( mask & AFMT_S16_OE ) {
\r
6746 deviceFormat = AFMT_S16_OE;
\r
6747 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6748 stream_.doByteSwap[mode] = true;
\r
6751 else if ( format == RTAUDIO_SINT24 ) {
\r
6752 if ( mask & AFMT_S24_NE ) {
\r
6753 deviceFormat = AFMT_S24_NE;
\r
6754 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6756 else if ( mask & AFMT_S24_OE ) {
\r
6757 deviceFormat = AFMT_S24_OE;
\r
6758 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6759 stream_.doByteSwap[mode] = true;
\r
6762 else if ( format == RTAUDIO_SINT32 ) {
\r
6763 if ( mask & AFMT_S32_NE ) {
\r
6764 deviceFormat = AFMT_S32_NE;
\r
6765 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6767 else if ( mask & AFMT_S32_OE ) {
\r
6768 deviceFormat = AFMT_S32_OE;
\r
6769 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6770 stream_.doByteSwap[mode] = true;
\r
6774 if ( deviceFormat == -1 ) {
\r
6775 // The user requested format is not natively supported by the device.
\r
6776 if ( mask & AFMT_S16_NE ) {
\r
6777 deviceFormat = AFMT_S16_NE;
\r
6778 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6780 else if ( mask & AFMT_S32_NE ) {
\r
6781 deviceFormat = AFMT_S32_NE;
\r
6782 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6784 else if ( mask & AFMT_S24_NE ) {
\r
6785 deviceFormat = AFMT_S24_NE;
\r
6786 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6788 else if ( mask & AFMT_S16_OE ) {
\r
6789 deviceFormat = AFMT_S16_OE;
\r
6790 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6791 stream_.doByteSwap[mode] = true;
\r
6793 else if ( mask & AFMT_S32_OE ) {
\r
6794 deviceFormat = AFMT_S32_OE;
\r
6795 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6796 stream_.doByteSwap[mode] = true;
\r
6798 else if ( mask & AFMT_S24_OE ) {
\r
6799 deviceFormat = AFMT_S24_OE;
\r
6800 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6801 stream_.doByteSwap[mode] = true;
\r
6803 else if ( mask & AFMT_S8) {
\r
6804 deviceFormat = AFMT_S8;
\r
6805 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6809 if ( stream_.deviceFormat[mode] == 0 ) {
\r
6810 // This really shouldn't happen ...
\r
6812 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6813 errorText_ = errorStream_.str();
\r
6817 // Set the data format.
\r
6818 int temp = deviceFormat;
\r
6819 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
6820 if ( result == -1 || deviceFormat != temp ) {
\r
6822 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
6823 errorText_ = errorStream_.str();
\r
6827 // Attempt to set the buffer size. According to OSS, the minimum
\r
6828 // number of buffers is two. The supposed minimum buffer size is 16
\r
6829 // bytes, so that will be our lower bound. The argument to this
\r
6830 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
6831 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
6832 // We'll check the actual value used near the end of the setup
\r
6834 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
6835 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
6837 if ( options ) buffers = options->numberOfBuffers;
\r
6838 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
6839 if ( buffers < 2 ) buffers = 3;
\r
6840 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
6841 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
6842 if ( result == -1 ) {
\r
6844 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
6845 errorText_ = errorStream_.str();
\r
6848 stream_.nBuffers = buffers;
\r
6850 // Save buffer size (in sample frames).
\r
6851 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
6852 stream_.bufferSize = *bufferSize;
\r
6854 // Set the sample rate.
\r
6855 int srate = sampleRate;
\r
6856 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
6857 if ( result == -1 ) {
\r
6859 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
6860 errorText_ = errorStream_.str();
\r
6864 // Verify the sample rate setup worked.
\r
6865 if ( abs( srate - sampleRate ) > 100 ) {
\r
6867 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
6868 errorText_ = errorStream_.str();
\r
6871 stream_.sampleRate = sampleRate;
\r
6873 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6874 // We're doing duplex setup here.
\r
6875 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
6876 stream_.nDeviceChannels[0] = deviceChannels;
\r
6879 // Set interleaving parameters.
\r
6880 stream_.userInterleaved = true;
\r
6881 stream_.deviceInterleaved[mode] = true;
\r
6882 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
6883 stream_.userInterleaved = false;
\r
6885 // Set flags for buffer conversion
\r
6886 stream_.doConvertBuffer[mode] = false;
\r
6887 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
6888 stream_.doConvertBuffer[mode] = true;
\r
6889 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
6890 stream_.doConvertBuffer[mode] = true;
\r
6891 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
6892 stream_.nUserChannels[mode] > 1 )
\r
6893 stream_.doConvertBuffer[mode] = true;
\r
6895 // Allocate the stream handles if necessary and then save.
\r
6896 if ( stream_.apiHandle == 0 ) {
\r
6898 handle = new OssHandle;
\r
6900 catch ( std::bad_alloc& ) {
\r
6901 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
6905 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
6906 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
6910 stream_.apiHandle = (void *) handle;
\r
6913 handle = (OssHandle *) stream_.apiHandle;
\r
6915 handle->id[mode] = fd;
\r
6917 // Allocate necessary internal buffers.
\r
6918 unsigned long bufferBytes;
\r
6919 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6920 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6921 if ( stream_.userBuffer[mode] == NULL ) {
\r
6922 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
6926 if ( stream_.doConvertBuffer[mode] ) {
\r
6928 bool makeBuffer = true;
\r
6929 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6930 if ( mode == INPUT ) {
\r
6931 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6932 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6933 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6937 if ( makeBuffer ) {
\r
6938 bufferBytes *= *bufferSize;
\r
6939 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6940 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6941 if ( stream_.deviceBuffer == NULL ) {
\r
6942 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
6948 stream_.device[mode] = device;
\r
6949 stream_.state = STREAM_STOPPED;
\r
6951 // Setup the buffer conversion information structure.
\r
6952 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6954 // Setup thread if necessary.
\r
6955 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
6956 // We had already set up an output stream.
\r
6957 stream_.mode = DUPLEX;
\r
6958 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
6961 stream_.mode = mode;
\r
6963 // Setup callback thread.
\r
6964 stream_.callbackInfo.object = (void *) this;
\r
6966 // Set the thread attributes for joinable and realtime scheduling
\r
6967 // priority. The higher priority will only take affect if the
\r
6968 // program is run as root or suid.
\r
6969 pthread_attr_t attr;
\r
6970 pthread_attr_init( &attr );
\r
6971 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
6972 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6973 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
6974 struct sched_param param;
\r
6975 int priority = options->priority;
\r
6976 int min = sched_get_priority_min( SCHED_RR );
\r
6977 int max = sched_get_priority_max( SCHED_RR );
\r
6978 if ( priority < min ) priority = min;
\r
6979 else if ( priority > max ) priority = max;
\r
6980 param.sched_priority = priority;
\r
6981 pthread_attr_setschedparam( &attr, ¶m );
\r
6982 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
6985 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6987 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6990 stream_.callbackInfo.isRunning = true;
\r
6991 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
6992 pthread_attr_destroy( &attr );
\r
6994 stream_.callbackInfo.isRunning = false;
\r
6995 errorText_ = "RtApiOss::error creating callback thread!";
\r
7004 pthread_cond_destroy( &handle->runnable );
\r
7005 if ( handle->id[0] ) close( handle->id[0] );
\r
7006 if ( handle->id[1] ) close( handle->id[1] );
\r
7008 stream_.apiHandle = 0;
\r
7011 for ( int i=0; i<2; i++ ) {
\r
7012 if ( stream_.userBuffer[i] ) {
\r
7013 free( stream_.userBuffer[i] );
\r
7014 stream_.userBuffer[i] = 0;
\r
7018 if ( stream_.deviceBuffer ) {
\r
7019 free( stream_.deviceBuffer );
\r
7020 stream_.deviceBuffer = 0;
\r
7026 void RtApiOss :: closeStream()
\r
7028 if ( stream_.state == STREAM_CLOSED ) {
\r
7029 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
7030 error( RtError::WARNING );
\r
7034 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7035 stream_.callbackInfo.isRunning = false;
\r
7036 MUTEX_LOCK( &stream_.mutex );
\r
7037 if ( stream_.state == STREAM_STOPPED )
\r
7038 pthread_cond_signal( &handle->runnable );
\r
7039 MUTEX_UNLOCK( &stream_.mutex );
\r
7040 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7042 if ( stream_.state == STREAM_RUNNING ) {
\r
7043 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7044 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7046 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7047 stream_.state = STREAM_STOPPED;
\r
7051 pthread_cond_destroy( &handle->runnable );
\r
7052 if ( handle->id[0] ) close( handle->id[0] );
\r
7053 if ( handle->id[1] ) close( handle->id[1] );
\r
7055 stream_.apiHandle = 0;
\r
7058 for ( int i=0; i<2; i++ ) {
\r
7059 if ( stream_.userBuffer[i] ) {
\r
7060 free( stream_.userBuffer[i] );
\r
7061 stream_.userBuffer[i] = 0;
\r
7065 if ( stream_.deviceBuffer ) {
\r
7066 free( stream_.deviceBuffer );
\r
7067 stream_.deviceBuffer = 0;
\r
7070 stream_.mode = UNINITIALIZED;
\r
7071 stream_.state = STREAM_CLOSED;
\r
7074 void RtApiOss :: startStream()
\r
7077 if ( stream_.state == STREAM_RUNNING ) {
\r
7078 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7079 error( RtError::WARNING );
\r
7083 MUTEX_LOCK( &stream_.mutex );
\r
7085 stream_.state = STREAM_RUNNING;
\r
7087 // No need to do anything else here ... OSS automatically starts
\r
7088 // when fed samples.
\r
7090 MUTEX_UNLOCK( &stream_.mutex );
\r
7092 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7093 pthread_cond_signal( &handle->runnable );
\r
7096 void RtApiOss :: stopStream()
\r
7099 if ( stream_.state == STREAM_STOPPED ) {
\r
7100 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7101 error( RtError::WARNING );
\r
7105 MUTEX_LOCK( &stream_.mutex );
\r
7107 // The state might change while waiting on a mutex.
\r
7108 if ( stream_.state == STREAM_STOPPED ) {
\r
7109 MUTEX_UNLOCK( &stream_.mutex );
\r
7114 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7115 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7117 // Flush the output with zeros a few times.
\r
7120 RtAudioFormat format;
\r
7122 if ( stream_.doConvertBuffer[0] ) {
\r
7123 buffer = stream_.deviceBuffer;
\r
7124 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7125 format = stream_.deviceFormat[0];
\r
7128 buffer = stream_.userBuffer[0];
\r
7129 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7130 format = stream_.userFormat;
\r
7133 memset( buffer, 0, samples * formatBytes(format) );
\r
7134 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7135 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7136 if ( result == -1 ) {
\r
7137 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7138 error( RtError::WARNING );
\r
7142 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7143 if ( result == -1 ) {
\r
7144 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7145 errorText_ = errorStream_.str();
\r
7148 handle->triggered = false;
\r
7151 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7152 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7153 if ( result == -1 ) {
\r
7154 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7155 errorText_ = errorStream_.str();
\r
7161 stream_.state = STREAM_STOPPED;
\r
7162 MUTEX_UNLOCK( &stream_.mutex );
\r
7164 if ( result != -1 ) return;
\r
7165 error( RtError::SYSTEM_ERROR );
\r
7168 void RtApiOss :: abortStream()
\r
7171 if ( stream_.state == STREAM_STOPPED ) {
\r
7172 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7173 error( RtError::WARNING );
\r
7177 MUTEX_LOCK( &stream_.mutex );
\r
7179 // The state might change while waiting on a mutex.
\r
7180 if ( stream_.state == STREAM_STOPPED ) {
\r
7181 MUTEX_UNLOCK( &stream_.mutex );
\r
7186 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7187 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7188 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7189 if ( result == -1 ) {
\r
7190 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7191 errorText_ = errorStream_.str();
\r
7194 handle->triggered = false;
\r
7197 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7198 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7199 if ( result == -1 ) {
\r
7200 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7201 errorText_ = errorStream_.str();
\r
7207 stream_.state = STREAM_STOPPED;
\r
7208 MUTEX_UNLOCK( &stream_.mutex );
\r
7210 if ( result != -1 ) return;
\r
7211 error( RtError::SYSTEM_ERROR );
\r
7214 void RtApiOss :: callbackEvent()
\r
7216 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7217 if ( stream_.state == STREAM_STOPPED ) {
\r
7218 MUTEX_LOCK( &stream_.mutex );
\r
7219 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7220 if ( stream_.state != STREAM_RUNNING ) {
\r
7221 MUTEX_UNLOCK( &stream_.mutex );
\r
7224 MUTEX_UNLOCK( &stream_.mutex );
\r
7227 if ( stream_.state == STREAM_CLOSED ) {
\r
7228 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7229 error( RtError::WARNING );
\r
7233 // Invoke user callback to get fresh output data.
\r
7234 int doStopStream = 0;
\r
7235 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7236 double streamTime = getStreamTime();
\r
7237 RtAudioStreamStatus status = 0;
\r
7238 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7239 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7240 handle->xrun[0] = false;
\r
7242 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7243 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7244 handle->xrun[1] = false;
\r
7246 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7247 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7248 if ( doStopStream == 2 ) {
\r
7249 this->abortStream();
\r
7253 MUTEX_LOCK( &stream_.mutex );
\r
7255 // The state might change while waiting on a mutex.
\r
7256 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7261 RtAudioFormat format;
\r
7263 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7265 // Setup parameters and do buffer conversion if necessary.
\r
7266 if ( stream_.doConvertBuffer[0] ) {
\r
7267 buffer = stream_.deviceBuffer;
\r
7268 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7269 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7270 format = stream_.deviceFormat[0];
\r
7273 buffer = stream_.userBuffer[0];
\r
7274 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7275 format = stream_.userFormat;
\r
7278 // Do byte swapping if necessary.
\r
7279 if ( stream_.doByteSwap[0] )
\r
7280 byteSwapBuffer( buffer, samples, format );
\r
7282 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7284 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7285 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7286 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7287 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7288 handle->triggered = true;
\r
7291 // Write samples to device.
\r
7292 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7294 if ( result == -1 ) {
\r
7295 // We'll assume this is an underrun, though there isn't a
\r
7296 // specific means for determining that.
\r
7297 handle->xrun[0] = true;
\r
7298 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7299 error( RtError::WARNING );
\r
7300 // Continue on to input section.
\r
7304 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7306 // Setup parameters.
\r
7307 if ( stream_.doConvertBuffer[1] ) {
\r
7308 buffer = stream_.deviceBuffer;
\r
7309 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7310 format = stream_.deviceFormat[1];
\r
7313 buffer = stream_.userBuffer[1];
\r
7314 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7315 format = stream_.userFormat;
\r
7318 // Read samples from device.
\r
7319 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7321 if ( result == -1 ) {
\r
7322 // We'll assume this is an overrun, though there isn't a
\r
7323 // specific means for determining that.
\r
7324 handle->xrun[1] = true;
\r
7325 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7326 error( RtError::WARNING );
\r
7330 // Do byte swapping if necessary.
\r
7331 if ( stream_.doByteSwap[1] )
\r
7332 byteSwapBuffer( buffer, samples, format );
\r
7334 // Do buffer conversion if necessary.
\r
7335 if ( stream_.doConvertBuffer[1] )
\r
7336 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7340 MUTEX_UNLOCK( &stream_.mutex );
\r
7342 RtApi::tickStreamTime();
\r
7343 if ( doStopStream == 1 ) this->stopStream();
\r
7346 extern "C" void *ossCallbackHandler( void *ptr )
\r
7348 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7349 RtApiOss *object = (RtApiOss *) info->object;
\r
7350 bool *isRunning = &info->isRunning;
\r
7352 while ( *isRunning == true ) {
\r
7353 pthread_testcancel();
\r
7354 object->callbackEvent();
\r
7357 pthread_exit( NULL );
\r
7360 //******************** End of __LINUX_OSS__ *********************//
\r
7364 // *************************************************** //
\r
7366 // Protected common (OS-independent) RtAudio methods.
\r
7368 // *************************************************** //
\r
7370 // This method can be modified to control the behavior of error
\r
7371 // message printing.
\r
7372 void RtApi :: error( RtError::Type type )
\r
7374 errorStream_.str(""); // clear the ostringstream
\r
7375 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7376 std::cerr << '\n' << errorText_ << "\n\n";
\r
7377 else if ( type != RtError::WARNING )
\r
7378 throw( RtError( errorText_, type ) );
\r
7381 void RtApi :: verifyStream()
\r
7383 if ( stream_.state == STREAM_CLOSED ) {
\r
7384 errorText_ = "RtApi:: a stream is not open!";
\r
7385 error( RtError::INVALID_USE );
\r
7389 void RtApi :: clearStreamInfo()
\r
7391 stream_.mode = UNINITIALIZED;
\r
7392 stream_.state = STREAM_CLOSED;
\r
7393 stream_.sampleRate = 0;
\r
7394 stream_.bufferSize = 0;
\r
7395 stream_.nBuffers = 0;
\r
7396 stream_.userFormat = 0;
\r
7397 stream_.userInterleaved = true;
\r
7398 stream_.streamTime = 0.0;
\r
7399 stream_.apiHandle = 0;
\r
7400 stream_.deviceBuffer = 0;
\r
7401 stream_.callbackInfo.callback = 0;
\r
7402 stream_.callbackInfo.userData = 0;
\r
7403 stream_.callbackInfo.isRunning = false;
\r
7404 for ( int i=0; i<2; i++ ) {
\r
7405 stream_.device[i] = 11111;
\r
7406 stream_.doConvertBuffer[i] = false;
\r
7407 stream_.deviceInterleaved[i] = true;
\r
7408 stream_.doByteSwap[i] = false;
\r
7409 stream_.nUserChannels[i] = 0;
\r
7410 stream_.nDeviceChannels[i] = 0;
\r
7411 stream_.channelOffset[i] = 0;
\r
7412 stream_.deviceFormat[i] = 0;
\r
7413 stream_.latency[i] = 0;
\r
7414 stream_.userBuffer[i] = 0;
\r
7415 stream_.convertInfo[i].channels = 0;
\r
7416 stream_.convertInfo[i].inJump = 0;
\r
7417 stream_.convertInfo[i].outJump = 0;
\r
7418 stream_.convertInfo[i].inFormat = 0;
\r
7419 stream_.convertInfo[i].outFormat = 0;
\r
7420 stream_.convertInfo[i].inOffset.clear();
\r
7421 stream_.convertInfo[i].outOffset.clear();
\r
7425 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7427 if ( format == RTAUDIO_SINT16 )
\r
7429 else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||
\r
7430 format == RTAUDIO_FLOAT32 )
\r
7432 else if ( format == RTAUDIO_FLOAT64 )
\r
7434 else if ( format == RTAUDIO_SINT8 )
\r
7437 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7438 error( RtError::WARNING );
\r
7443 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7445 if ( mode == INPUT ) { // convert device to user buffer
\r
7446 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7447 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7448 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7449 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7451 else { // convert user to device buffer
\r
7452 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7453 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7454 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7455 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7458 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7459 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7461 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7463 // Set up the interleave/deinterleave offsets.
\r
7464 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7465 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7466 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7467 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7468 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7469 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7470 stream_.convertInfo[mode].inJump = 1;
\r
7474 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7475 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7476 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7477 stream_.convertInfo[mode].outJump = 1;
\r
7481 else { // no (de)interleaving
\r
7482 if ( stream_.userInterleaved ) {
\r
7483 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7484 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7485 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7489 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7490 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7491 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7492 stream_.convertInfo[mode].inJump = 1;
\r
7493 stream_.convertInfo[mode].outJump = 1;
\r
7498 // Add channel offset.
\r
7499 if ( firstChannel > 0 ) {
\r
7500 if ( stream_.deviceInterleaved[mode] ) {
\r
7501 if ( mode == OUTPUT ) {
\r
7502 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7503 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7506 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7507 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7511 if ( mode == OUTPUT ) {
\r
7512 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7513 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7516 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7517 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7523 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7525 // This function does format conversion, input/output channel compensation, and
\r
7526 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7527 // the lower three bytes of a 32-bit integer.
\r
7529 // Clear our device buffer when in/out duplex device channels are different
\r
7530 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7531 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7532 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7535 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7537 Float64 *out = (Float64 *)outBuffer;
\r
7539 if (info.inFormat == RTAUDIO_SINT8) {
\r
7540 signed char *in = (signed char *)inBuffer;
\r
7541 scale = 1.0 / 127.5;
\r
7542 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7543 for (j=0; j<info.channels; j++) {
\r
7544 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7545 out[info.outOffset[j]] += 0.5;
\r
7546 out[info.outOffset[j]] *= scale;
\r
7548 in += info.inJump;
\r
7549 out += info.outJump;
\r
7552 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7553 Int16 *in = (Int16 *)inBuffer;
\r
7554 scale = 1.0 / 32767.5;
\r
7555 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7556 for (j=0; j<info.channels; j++) {
\r
7557 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7558 out[info.outOffset[j]] += 0.5;
\r
7559 out[info.outOffset[j]] *= scale;
\r
7561 in += info.inJump;
\r
7562 out += info.outJump;
\r
7565 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7566 Int32 *in = (Int32 *)inBuffer;
\r
7567 scale = 1.0 / 8388607.5;
\r
7568 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7569 for (j=0; j<info.channels; j++) {
\r
7570 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);
\r
7571 out[info.outOffset[j]] += 0.5;
\r
7572 out[info.outOffset[j]] *= scale;
\r
7574 in += info.inJump;
\r
7575 out += info.outJump;
\r
7578 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7579 Int32 *in = (Int32 *)inBuffer;
\r
7580 scale = 1.0 / 2147483647.5;
\r
7581 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7582 for (j=0; j<info.channels; j++) {
\r
7583 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7584 out[info.outOffset[j]] += 0.5;
\r
7585 out[info.outOffset[j]] *= scale;
\r
7587 in += info.inJump;
\r
7588 out += info.outJump;
\r
7591 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7592 Float32 *in = (Float32 *)inBuffer;
\r
7593 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7594 for (j=0; j<info.channels; j++) {
\r
7595 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7597 in += info.inJump;
\r
7598 out += info.outJump;
\r
7601 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7602 // Channel compensation and/or (de)interleaving only.
\r
7603 Float64 *in = (Float64 *)inBuffer;
\r
7604 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7605 for (j=0; j<info.channels; j++) {
\r
7606 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7608 in += info.inJump;
\r
7609 out += info.outJump;
\r
7613 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7615 Float32 *out = (Float32 *)outBuffer;
\r
7617 if (info.inFormat == RTAUDIO_SINT8) {
\r
7618 signed char *in = (signed char *)inBuffer;
\r
7619 scale = (Float32) ( 1.0 / 127.5 );
\r
7620 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7621 for (j=0; j<info.channels; j++) {
\r
7622 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7623 out[info.outOffset[j]] += 0.5;
\r
7624 out[info.outOffset[j]] *= scale;
\r
7626 in += info.inJump;
\r
7627 out += info.outJump;
\r
7630 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7631 Int16 *in = (Int16 *)inBuffer;
\r
7632 scale = (Float32) ( 1.0 / 32767.5 );
\r
7633 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7634 for (j=0; j<info.channels; j++) {
\r
7635 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7636 out[info.outOffset[j]] += 0.5;
\r
7637 out[info.outOffset[j]] *= scale;
\r
7639 in += info.inJump;
\r
7640 out += info.outJump;
\r
7643 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7644 Int32 *in = (Int32 *)inBuffer;
\r
7645 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7646 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7647 for (j=0; j<info.channels; j++) {
\r
7648 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);
\r
7649 out[info.outOffset[j]] += 0.5;
\r
7650 out[info.outOffset[j]] *= scale;
\r
7652 in += info.inJump;
\r
7653 out += info.outJump;
\r
7656 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7657 Int32 *in = (Int32 *)inBuffer;
\r
7658 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7659 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7660 for (j=0; j<info.channels; j++) {
\r
7661 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7662 out[info.outOffset[j]] += 0.5;
\r
7663 out[info.outOffset[j]] *= scale;
\r
7665 in += info.inJump;
\r
7666 out += info.outJump;
\r
7669 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7670 // Channel compensation and/or (de)interleaving only.
\r
7671 Float32 *in = (Float32 *)inBuffer;
\r
7672 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7673 for (j=0; j<info.channels; j++) {
\r
7674 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7676 in += info.inJump;
\r
7677 out += info.outJump;
\r
7680 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7681 Float64 *in = (Float64 *)inBuffer;
\r
7682 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7683 for (j=0; j<info.channels; j++) {
\r
7684 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7686 in += info.inJump;
\r
7687 out += info.outJump;
\r
7691 else if (info.outFormat == RTAUDIO_SINT32) {
\r
7692 Int32 *out = (Int32 *)outBuffer;
\r
7693 if (info.inFormat == RTAUDIO_SINT8) {
\r
7694 signed char *in = (signed char *)inBuffer;
\r
7695 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7696 for (j=0; j<info.channels; j++) {
\r
7697 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7698 out[info.outOffset[j]] <<= 24;
\r
7700 in += info.inJump;
\r
7701 out += info.outJump;
\r
7704 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7705 Int16 *in = (Int16 *)inBuffer;
\r
7706 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7707 for (j=0; j<info.channels; j++) {
\r
7708 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7709 out[info.outOffset[j]] <<= 16;
\r
7711 in += info.inJump;
\r
7712 out += info.outJump;
\r
7715 else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes
\r
7716 Int32 *in = (Int32 *)inBuffer;
\r
7717 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7718 for (j=0; j<info.channels; j++) {
\r
7719 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7720 out[info.outOffset[j]] <<= 8;
\r
7722 in += info.inJump;
\r
7723 out += info.outJump;
\r
7726 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7727 // Channel compensation and/or (de)interleaving only.
\r
7728 Int32 *in = (Int32 *)inBuffer;
\r
7729 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7730 for (j=0; j<info.channels; j++) {
\r
7731 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7733 in += info.inJump;
\r
7734 out += info.outJump;
\r
7737 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7738 Float32 *in = (Float32 *)inBuffer;
\r
7739 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7740 for (j=0; j<info.channels; j++) {
\r
7741 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7743 in += info.inJump;
\r
7744 out += info.outJump;
\r
7747 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7748 Float64 *in = (Float64 *)inBuffer;
\r
7749 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7750 for (j=0; j<info.channels; j++) {
\r
7751 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7753 in += info.inJump;
\r
7754 out += info.outJump;
\r
7758 else if (info.outFormat == RTAUDIO_SINT24) {
\r
7759 Int32 *out = (Int32 *)outBuffer;
\r
7760 if (info.inFormat == RTAUDIO_SINT8) {
\r
7761 signed char *in = (signed char *)inBuffer;
\r
7762 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7763 for (j=0; j<info.channels; j++) {
\r
7764 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7765 out[info.outOffset[j]] <<= 16;
\r
7767 in += info.inJump;
\r
7768 out += info.outJump;
\r
7771 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7772 Int16 *in = (Int16 *)inBuffer;
\r
7773 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7774 for (j=0; j<info.channels; j++) {
\r
7775 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7776 out[info.outOffset[j]] <<= 8;
\r
7778 in += info.inJump;
\r
7779 out += info.outJump;
\r
7782 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7783 // Channel compensation and/or (de)interleaving only.
\r
7784 Int32 *in = (Int32 *)inBuffer;
\r
7785 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7786 for (j=0; j<info.channels; j++) {
\r
7787 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7789 in += info.inJump;
\r
7790 out += info.outJump;
\r
7793 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7794 Int32 *in = (Int32 *)inBuffer;
\r
7795 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7796 for (j=0; j<info.channels; j++) {
\r
7797 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7798 out[info.outOffset[j]] >>= 8;
\r
7800 in += info.inJump;
\r
7801 out += info.outJump;
\r
7804 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7805 Float32 *in = (Float32 *)inBuffer;
\r
7806 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7807 for (j=0; j<info.channels; j++) {
\r
7808 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7810 in += info.inJump;
\r
7811 out += info.outJump;
\r
7814 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7815 Float64 *in = (Float64 *)inBuffer;
\r
7816 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7817 for (j=0; j<info.channels; j++) {
\r
7818 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7820 in += info.inJump;
\r
7821 out += info.outJump;
\r
7825 else if (info.outFormat == RTAUDIO_SINT16) {
\r
7826 Int16 *out = (Int16 *)outBuffer;
\r
7827 if (info.inFormat == RTAUDIO_SINT8) {
\r
7828 signed char *in = (signed char *)inBuffer;
\r
7829 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7830 for (j=0; j<info.channels; j++) {
\r
7831 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
7832 out[info.outOffset[j]] <<= 8;
\r
7834 in += info.inJump;
\r
7835 out += info.outJump;
\r
7838 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7839 // Channel compensation and/or (de)interleaving only.
\r
7840 Int16 *in = (Int16 *)inBuffer;
\r
7841 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7842 for (j=0; j<info.channels; j++) {
\r
7843 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7845 in += info.inJump;
\r
7846 out += info.outJump;
\r
7849 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7850 Int32 *in = (Int32 *)inBuffer;
\r
7851 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7852 for (j=0; j<info.channels; j++) {
\r
7853 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);
\r
7855 in += info.inJump;
\r
7856 out += info.outJump;
\r
7859 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7860 Int32 *in = (Int32 *)inBuffer;
\r
7861 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7862 for (j=0; j<info.channels; j++) {
\r
7863 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
7865 in += info.inJump;
\r
7866 out += info.outJump;
\r
7869 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7870 Float32 *in = (Float32 *)inBuffer;
\r
7871 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7872 for (j=0; j<info.channels; j++) {
\r
7873 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7875 in += info.inJump;
\r
7876 out += info.outJump;
\r
7879 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7880 Float64 *in = (Float64 *)inBuffer;
\r
7881 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7882 for (j=0; j<info.channels; j++) {
\r
7883 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7885 in += info.inJump;
\r
7886 out += info.outJump;
\r
7890 else if (info.outFormat == RTAUDIO_SINT8) {
\r
7891 signed char *out = (signed char *)outBuffer;
\r
7892 if (info.inFormat == RTAUDIO_SINT8) {
\r
7893 // Channel compensation and/or (de)interleaving only.
\r
7894 signed char *in = (signed char *)inBuffer;
\r
7895 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7896 for (j=0; j<info.channels; j++) {
\r
7897 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7899 in += info.inJump;
\r
7900 out += info.outJump;
\r
7903 if (info.inFormat == RTAUDIO_SINT16) {
\r
7904 Int16 *in = (Int16 *)inBuffer;
\r
7905 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7906 for (j=0; j<info.channels; j++) {
\r
7907 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
7909 in += info.inJump;
\r
7910 out += info.outJump;
\r
7913 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7914 Int32 *in = (Int32 *)inBuffer;
\r
7915 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7916 for (j=0; j<info.channels; j++) {
\r
7917 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);
\r
7919 in += info.inJump;
\r
7920 out += info.outJump;
\r
7923 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7924 Int32 *in = (Int32 *)inBuffer;
\r
7925 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7926 for (j=0; j<info.channels; j++) {
\r
7927 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
7929 in += info.inJump;
\r
7930 out += info.outJump;
\r
7933 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7934 Float32 *in = (Float32 *)inBuffer;
\r
7935 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7936 for (j=0; j<info.channels; j++) {
\r
7937 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7939 in += info.inJump;
\r
7940 out += info.outJump;
\r
7943 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7944 Float64 *in = (Float64 *)inBuffer;
\r
7945 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7946 for (j=0; j<info.channels; j++) {
\r
7947 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7949 in += info.inJump;
\r
7950 out += info.outJump;
\r
7956 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
7957 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
7958 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
7960 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
7962 register char val;
\r
7963 register char *ptr;
\r
7966 if ( format == RTAUDIO_SINT16 ) {
\r
7967 for ( unsigned int i=0; i<samples; i++ ) {
\r
7968 // Swap 1st and 2nd bytes.
\r
7970 *(ptr) = *(ptr+1);
\r
7973 // Increment 2 bytes.
\r
7977 else if ( format == RTAUDIO_SINT24 ||
\r
7978 format == RTAUDIO_SINT32 ||
\r
7979 format == RTAUDIO_FLOAT32 ) {
\r
7980 for ( unsigned int i=0; i<samples; i++ ) {
\r
7981 // Swap 1st and 4th bytes.
\r
7983 *(ptr) = *(ptr+3);
\r
7986 // Swap 2nd and 3rd bytes.
\r
7989 *(ptr) = *(ptr+1);
\r
7992 // Increment 3 more bytes.
\r
7996 else if ( format == RTAUDIO_FLOAT64 ) {
\r
7997 for ( unsigned int i=0; i<samples; i++ ) {
\r
7998 // Swap 1st and 8th bytes
\r
8000 *(ptr) = *(ptr+7);
\r
8003 // Swap 2nd and 7th bytes
\r
8006 *(ptr) = *(ptr+5);
\r
8009 // Swap 3rd and 6th bytes
\r
8012 *(ptr) = *(ptr+3);
\r
8015 // Swap 4th and 5th bytes
\r
8018 *(ptr) = *(ptr+1);
\r
8021 // Increment 5 more bytes.
\r
8027 // Indentation settings for Vim and Emacs
\r
8029 // Local Variables:
\r
8030 // c-basic-offset: 2
\r
8031 // indent-tabs-mode: nil
\r
8034 // vim: et sts=2 sw=2
\r