1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1410 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1411 kAudioObjectPropertyScopeGlobal,
\r
1412 kAudioObjectPropertyElementMaster };
\r
1414 property.mSelector = kAudioDeviceProcessorOverload;
\r
1415 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1416 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1417 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1418 error( RtAudioError::WARNING );
\r
1421 if ( stream_.state == STREAM_RUNNING )
\r
1422 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1423 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1424 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1426 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1427 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1431 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1433 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1434 kAudioObjectPropertyScopeGlobal,
\r
1435 kAudioObjectPropertyElementMaster };
\r
1437 property.mSelector = kAudioDeviceProcessorOverload;
\r
1438 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1439 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1440 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1441 error( RtAudioError::WARNING );
\r
1444 if ( stream_.state == STREAM_RUNNING )
\r
1445 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1446 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1447 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1449 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1450 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1454 for ( int i=0; i<2; i++ ) {
\r
1455 if ( stream_.userBuffer[i] ) {
\r
1456 free( stream_.userBuffer[i] );
\r
1457 stream_.userBuffer[i] = 0;
\r
1461 if ( stream_.deviceBuffer ) {
\r
1462 free( stream_.deviceBuffer );
\r
1463 stream_.deviceBuffer = 0;
\r
1466 // Destroy pthread condition variable.
\r
1467 pthread_cond_destroy( &handle->condition );
\r
1469 stream_.apiHandle = 0;
\r
1471 stream_.mode = UNINITIALIZED;
\r
1472 stream_.state = STREAM_CLOSED;
\r
1475 void RtApiCore :: startStream( void )
\r
1478 if ( stream_.state == STREAM_RUNNING ) {
\r
1479 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1480 error( RtAudioError::WARNING );
\r
1484 OSStatus result = noErr;
\r
1485 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1488 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1489 if ( result != noErr ) {
\r
1490 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1491 errorText_ = errorStream_.str();
\r
1496 if ( stream_.mode == INPUT ||
\r
1497 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1499 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1500 if ( result != noErr ) {
\r
1501 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1502 errorText_ = errorStream_.str();
\r
1507 handle->drainCounter = 0;
\r
1508 handle->internalDrain = false;
\r
1509 stream_.state = STREAM_RUNNING;
\r
1512 if ( result == noErr ) return;
\r
1513 error( RtAudioError::SYSTEM_ERROR );
\r
1516 void RtApiCore :: stopStream( void )
\r
1519 if ( stream_.state == STREAM_STOPPED ) {
\r
1520 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1521 error( RtAudioError::WARNING );
\r
1525 OSStatus result = noErr;
\r
1526 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1529 if ( handle->drainCounter == 0 ) {
\r
1530 handle->drainCounter = 2;
\r
1531 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1534 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1535 if ( result != noErr ) {
\r
1536 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1537 errorText_ = errorStream_.str();
\r
1542 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1544 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1545 if ( result != noErr ) {
\r
1546 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1547 errorText_ = errorStream_.str();
\r
1552 stream_.state = STREAM_STOPPED;
\r
1555 if ( result == noErr ) return;
\r
1556 error( RtAudioError::SYSTEM_ERROR );
\r
1559 void RtApiCore :: abortStream( void )
\r
1562 if ( stream_.state == STREAM_STOPPED ) {
\r
1563 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1564 error( RtAudioError::WARNING );
\r
1568 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1569 handle->drainCounter = 2;
\r
1574 // This function will be called by a spawned thread when the user
\r
1575 // callback function signals that the stream should be stopped or
\r
1576 // aborted. It is better to handle it this way because the
\r
1577 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1578 // function is called.
\r
1579 static void *coreStopStream( void *ptr )
\r
1581 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1582 RtApiCore *object = (RtApiCore *) info->object;
\r
1584 object->stopStream();
\r
1585 pthread_exit( NULL );
\r
1588 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1589 const AudioBufferList *inBufferList,
\r
1590 const AudioBufferList *outBufferList )
\r
1592 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1593 if ( stream_.state == STREAM_CLOSED ) {
\r
1594 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1595 error( RtAudioError::WARNING );
\r
1599 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1602 // Check if we were draining the stream and signal is finished.
\r
1603 if ( handle->drainCounter > 3 ) {
\r
1604 ThreadHandle threadId;
\r
1606 stream_.state = STREAM_STOPPING;
\r
1607 if ( handle->internalDrain == true )
\r
1608 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1609 else // external call to stopStream()
\r
1610 pthread_cond_signal( &handle->condition );
\r
1614 AudioDeviceID outputDevice = handle->id[0];
\r
1616 // Invoke user callback to get fresh output data UNLESS we are
\r
1617 // draining stream or duplex mode AND the input/output devices are
\r
1618 // different AND this function is called for the input device.
\r
1619 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1620 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1621 double streamTime = getStreamTime();
\r
1622 RtAudioStreamStatus status = 0;
\r
1623 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1624 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1625 handle->xrun[0] = false;
\r
1627 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1628 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1629 handle->xrun[1] = false;
\r
1632 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1633 stream_.bufferSize, streamTime, status, info->userData );
\r
1634 if ( cbReturnValue == 2 ) {
\r
1635 stream_.state = STREAM_STOPPING;
\r
1636 handle->drainCounter = 2;
\r
1640 else if ( cbReturnValue == 1 ) {
\r
1641 handle->drainCounter = 1;
\r
1642 handle->internalDrain = true;
\r
1646 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1648 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1650 if ( handle->nStreams[0] == 1 ) {
\r
1651 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1653 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1655 else { // fill multiple streams with zeros
\r
1656 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1657 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1659 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1663 else if ( handle->nStreams[0] == 1 ) {
\r
1664 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1665 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1666 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1668 else { // copy from user buffer
\r
1669 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0],
\r
1671 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1674 else { // fill multiple streams
\r
1675 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1676 if ( stream_.doConvertBuffer[0] ) {
\r
1677 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1678 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1681 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1682 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1683 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1684 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1685 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1688 else { // fill multiple multi-channel streams with interleaved data
\r
1689 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1690 Float32 *out, *in;
\r
1692 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1693 UInt32 inChannels = stream_.nUserChannels[0];
\r
1694 if ( stream_.doConvertBuffer[0] ) {
\r
1695 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1696 inChannels = stream_.nDeviceChannels[0];
\r
1699 if ( inInterleaved ) inOffset = 1;
\r
1700 else inOffset = stream_.bufferSize;
\r
1702 channelsLeft = inChannels;
\r
1703 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1705 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1706 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1709 // Account for possible channel offset in first stream
\r
1710 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1711 streamChannels -= stream_.channelOffset[0];
\r
1712 outJump = stream_.channelOffset[0];
\r
1716 // Account for possible unfilled channels at end of the last stream
\r
1717 if ( streamChannels > channelsLeft ) {
\r
1718 outJump = streamChannels - channelsLeft;
\r
1719 streamChannels = channelsLeft;
\r
1722 // Determine input buffer offsets and skips
\r
1723 if ( inInterleaved ) {
\r
1724 inJump = inChannels;
\r
1725 in += inChannels - channelsLeft;
\r
1729 in += (inChannels - channelsLeft) * inOffset;
\r
1732 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1733 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1734 *out++ = in[j*inOffset];
\r
1739 channelsLeft -= streamChannels;
\r
1745 // Don't bother draining input
\r
1746 if ( handle->drainCounter ) {
\r
1747 handle->drainCounter++;
\r
1751 AudioDeviceID inputDevice;
\r
1752 inputDevice = handle->id[1];
\r
1753 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1755 if ( handle->nStreams[1] == 1 ) {
\r
1756 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1757 convertBuffer( stream_.userBuffer[1],
\r
1758 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1759 stream_.convertInfo[1] );
\r
1761 else { // copy to user buffer
\r
1762 memcpy( stream_.userBuffer[1],
\r
1763 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1767 else { // read from multiple streams
\r
1768 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1769 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1771 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1772 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1773 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1774 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1775 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1778 else { // read from multiple multi-channel streams
\r
1779 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1780 Float32 *out, *in;
\r
1782 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1783 UInt32 outChannels = stream_.nUserChannels[1];
\r
1784 if ( stream_.doConvertBuffer[1] ) {
\r
1785 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1786 outChannels = stream_.nDeviceChannels[1];
\r
1789 if ( outInterleaved ) outOffset = 1;
\r
1790 else outOffset = stream_.bufferSize;
\r
1792 channelsLeft = outChannels;
\r
1793 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1795 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1796 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1799 // Account for possible channel offset in first stream
\r
1800 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1801 streamChannels -= stream_.channelOffset[1];
\r
1802 inJump = stream_.channelOffset[1];
\r
1806 // Account for possible unread channels at end of the last stream
\r
1807 if ( streamChannels > channelsLeft ) {
\r
1808 inJump = streamChannels - channelsLeft;
\r
1809 streamChannels = channelsLeft;
\r
1812 // Determine output buffer offsets and skips
\r
1813 if ( outInterleaved ) {
\r
1814 outJump = outChannels;
\r
1815 out += outChannels - channelsLeft;
\r
1819 out += (outChannels - channelsLeft) * outOffset;
\r
1822 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1823 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1824 out[j*outOffset] = *in++;
\r
1829 channelsLeft -= streamChannels;
\r
1833 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1834 convertBuffer( stream_.userBuffer[1],
\r
1835 stream_.deviceBuffer,
\r
1836 stream_.convertInfo[1] );
\r
1842 //MUTEX_UNLOCK( &stream_.mutex );
\r
1844 RtApi::tickStreamTime();
\r
1848 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1852 case kAudioHardwareNotRunningError:
\r
1853 return "kAudioHardwareNotRunningError";
\r
1855 case kAudioHardwareUnspecifiedError:
\r
1856 return "kAudioHardwareUnspecifiedError";
\r
1858 case kAudioHardwareUnknownPropertyError:
\r
1859 return "kAudioHardwareUnknownPropertyError";
\r
1861 case kAudioHardwareBadPropertySizeError:
\r
1862 return "kAudioHardwareBadPropertySizeError";
\r
1864 case kAudioHardwareIllegalOperationError:
\r
1865 return "kAudioHardwareIllegalOperationError";
\r
1867 case kAudioHardwareBadObjectError:
\r
1868 return "kAudioHardwareBadObjectError";
\r
1870 case kAudioHardwareBadDeviceError:
\r
1871 return "kAudioHardwareBadDeviceError";
\r
1873 case kAudioHardwareBadStreamError:
\r
1874 return "kAudioHardwareBadStreamError";
\r
1876 case kAudioHardwareUnsupportedOperationError:
\r
1877 return "kAudioHardwareUnsupportedOperationError";
\r
1879 case kAudioDeviceUnsupportedFormatError:
\r
1880 return "kAudioDeviceUnsupportedFormatError";
\r
1882 case kAudioDevicePermissionsError:
\r
1883 return "kAudioDevicePermissionsError";
\r
1886 return "CoreAudio unknown error";
\r
1890 //******************** End of __MACOSX_CORE__ *********************//
\r
1893 #if defined(__UNIX_JACK__)
\r
1895 // JACK is a low-latency audio server, originally written for the
\r
1896 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1897 // connect a number of different applications to an audio device, as
\r
1898 // well as allowing them to share audio between themselves.
\r
1900 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1901 // have ports connected to the server. The JACK server is typically
\r
1902 // started in a terminal as follows:
\r
1904 // .jackd -d alsa -d hw:0
\r
1906 // or through an interface program such as qjackctl. Many of the
\r
1907 // parameters normally set for a stream are fixed by the JACK server
\r
1908 // and can be specified when the JACK server is started. In
\r
1911 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1913 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1914 // frames, and number of buffers = 4. Once the server is running, it
\r
1915 // is not possible to override these values. If the values are not
\r
1916 // specified in the command-line, the JACK server uses default values.
\r
1918 // The JACK server does not have to be running when an instance of
\r
1919 // RtApiJack is created, though the function getDeviceCount() will
\r
1920 // report 0 devices found until JACK has been started. When no
\r
1921 // devices are available (i.e., the JACK server is not running), a
\r
1922 // stream cannot be opened.
\r
1924 #include <jack/jack.h>
\r
1925 #include <unistd.h>
\r
1928 // A structure to hold various information related to the Jack API
\r
1929 // implementation.
\r
1930 struct JackHandle {
\r
1931 jack_client_t *client;
\r
1932 jack_port_t **ports[2];
\r
1933 std::string deviceName[2];
\r
1935 pthread_cond_t condition;
\r
1936 int drainCounter; // Tracks callback counts when draining
\r
1937 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1940 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1943 static void jackSilentError( const char * ) {};
\r
1945 RtApiJack :: RtApiJack()
\r
1946 :shouldAutoconnect_(true) {
\r
1947 // Nothing to do here.
\r
1948 #if !defined(__RTAUDIO_DEBUG__)
\r
1949 // Turn off Jack's internal error reporting.
\r
1950 jack_set_error_function( &jackSilentError );
\r
1954 RtApiJack :: ~RtApiJack()
\r
1956 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1959 unsigned int RtApiJack :: getDeviceCount( void )
\r
1961 // See if we can become a jack client.
\r
1962 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1963 jack_status_t *status = NULL;
\r
1964 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1965 if ( client == 0 ) return 0;
\r
1967 const char **ports;
\r
1968 std::string port, previousPort;
\r
1969 unsigned int nChannels = 0, nDevices = 0;
\r
1970 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1972 // Parse the port names up to the first colon (:).
\r
1973 size_t iColon = 0;
\r
1975 port = (char *) ports[ nChannels ];
\r
1976 iColon = port.find(":");
\r
1977 if ( iColon != std::string::npos ) {
\r
1978 port = port.substr( 0, iColon + 1 );
\r
1979 if ( port != previousPort ) {
\r
1981 previousPort = port;
\r
1984 } while ( ports[++nChannels] );
\r
1988 jack_client_close( client );
\r
1992 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1994 RtAudio::DeviceInfo info;
\r
1995 info.probed = false;
\r
1997 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1998 jack_status_t *status = NULL;
\r
1999 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2000 if ( client == 0 ) {
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 const char **ports;
\r
2007 std::string port, previousPort;
\r
2008 unsigned int nPorts = 0, nDevices = 0;
\r
2009 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2011 // Parse the port names up to the first colon (:).
\r
2012 size_t iColon = 0;
\r
2014 port = (char *) ports[ nPorts ];
\r
2015 iColon = port.find(":");
\r
2016 if ( iColon != std::string::npos ) {
\r
2017 port = port.substr( 0, iColon );
\r
2018 if ( port != previousPort ) {
\r
2019 if ( nDevices == device ) info.name = port;
\r
2021 previousPort = port;
\r
2024 } while ( ports[++nPorts] );
\r
2028 if ( device >= nDevices ) {
\r
2029 jack_client_close( client );
\r
2030 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2031 error( RtAudioError::INVALID_USE );
\r
2035 // Get the current jack server sample rate.
\r
2036 info.sampleRates.clear();
\r
2038 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2039 info.sampleRates.push_back( info.preferredSampleRate );
\r
2041 // Count the available ports containing the client name as device
\r
2042 // channels. Jack "input ports" equal RtAudio output channels.
\r
2043 unsigned int nChannels = 0;
\r
2044 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2046 while ( ports[ nChannels ] ) nChannels++;
\r
2048 info.outputChannels = nChannels;
\r
2051 // Jack "output ports" equal RtAudio input channels.
\r
2053 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2055 while ( ports[ nChannels ] ) nChannels++;
\r
2057 info.inputChannels = nChannels;
\r
2060 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2061 jack_client_close(client);
\r
2062 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2063 error( RtAudioError::WARNING );
\r
2067 // If device opens for both playback and capture, we determine the channels.
\r
2068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2071 // Jack always uses 32-bit floats.
\r
2072 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2074 // Jack doesn't provide default devices so we'll use the first available one.
\r
2075 if ( device == 0 && info.outputChannels > 0 )
\r
2076 info.isDefaultOutput = true;
\r
2077 if ( device == 0 && info.inputChannels > 0 )
\r
2078 info.isDefaultInput = true;
\r
2080 jack_client_close(client);
\r
2081 info.probed = true;
\r
2085 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2087 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2089 RtApiJack *object = (RtApiJack *) info->object;
\r
2090 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2095 // This function will be called by a spawned thread when the Jack
\r
2096 // server signals that it is shutting down. It is necessary to handle
\r
2097 // it this way because the jackShutdown() function must return before
\r
2098 // the jack_deactivate() function (in closeStream()) will return.
\r
2099 static void *jackCloseStream( void *ptr )
\r
2101 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2102 RtApiJack *object = (RtApiJack *) info->object;
\r
2104 object->closeStream();
\r
2106 pthread_exit( NULL );
\r
2108 static void jackShutdown( void *infoPointer )
\r
2110 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2111 RtApiJack *object = (RtApiJack *) info->object;
\r
2113 // Check current stream state. If stopped, then we'll assume this
\r
2114 // was called as a result of a call to RtApiJack::stopStream (the
\r
2115 // deactivation of a client handle causes this function to be called).
\r
2116 // If not, we'll assume the Jack server is shutting down or some
\r
2117 // other problem occurred and we should close the stream.
\r
2118 if ( object->isStreamRunning() == false ) return;
\r
2120 ThreadHandle threadId;
\r
2121 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2122 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2125 static int jackXrun( void *infoPointer )
\r
2127 JackHandle *handle = (JackHandle *) infoPointer;
\r
2129 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2130 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2135 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2136 unsigned int firstChannel, unsigned int sampleRate,
\r
2137 RtAudioFormat format, unsigned int *bufferSize,
\r
2138 RtAudio::StreamOptions *options )
\r
2140 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2142 // Look for jack server and try to become a client (only do once per stream).
\r
2143 jack_client_t *client = 0;
\r
2144 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2145 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2146 jack_status_t *status = NULL;
\r
2147 if ( options && !options->streamName.empty() )
\r
2148 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2150 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2151 if ( client == 0 ) {
\r
2152 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2153 error( RtAudioError::WARNING );
\r
2158 // The handle must have been created on an earlier pass.
\r
2159 client = handle->client;
\r
2162 const char **ports;
\r
2163 std::string port, previousPort, deviceName;
\r
2164 unsigned int nPorts = 0, nDevices = 0;
\r
2165 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2167 // Parse the port names up to the first colon (:).
\r
2168 size_t iColon = 0;
\r
2170 port = (char *) ports[ nPorts ];
\r
2171 iColon = port.find(":");
\r
2172 if ( iColon != std::string::npos ) {
\r
2173 port = port.substr( 0, iColon );
\r
2174 if ( port != previousPort ) {
\r
2175 if ( nDevices == device ) deviceName = port;
\r
2177 previousPort = port;
\r
2180 } while ( ports[++nPorts] );
\r
2184 if ( device >= nDevices ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2189 // Count the available ports containing the client name as device
\r
2190 // channels. Jack "input ports" equal RtAudio output channels.
\r
2191 unsigned int nChannels = 0;
\r
2192 unsigned long flag = JackPortIsInput;
\r
2193 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2196 while ( ports[ nChannels ] ) nChannels++;
\r
2200 // Compare the jack ports for specified client to the requested number of channels.
\r
2201 if ( nChannels < (channels + firstChannel) ) {
\r
2202 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2203 errorText_ = errorStream_.str();
\r
2207 // Check the jack server sample rate.
\r
2208 unsigned int jackRate = jack_get_sample_rate( client );
\r
2209 if ( sampleRate != jackRate ) {
\r
2210 jack_client_close( client );
\r
2211 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2212 errorText_ = errorStream_.str();
\r
2215 stream_.sampleRate = jackRate;
\r
2217 // Get the latency of the JACK port.
\r
2218 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2219 if ( ports[ firstChannel ] ) {
\r
2220 // Added by Ge Wang
\r
2221 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2222 // the range (usually the min and max are equal)
\r
2223 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2224 // get the latency range
\r
2225 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2226 // be optimistic, use the min!
\r
2227 stream_.latency[mode] = latrange.min;
\r
2228 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2232 // The jack server always uses 32-bit floating-point data.
\r
2233 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2234 stream_.userFormat = format;
\r
2236 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2237 else stream_.userInterleaved = true;
\r
2239 // Jack always uses non-interleaved buffers.
\r
2240 stream_.deviceInterleaved[mode] = false;
\r
2242 // Jack always provides host byte-ordered data.
\r
2243 stream_.doByteSwap[mode] = false;
\r
2245 // Get the buffer size. The buffer size and number of buffers
\r
2246 // (periods) is set when the jack server is started.
\r
2247 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2248 *bufferSize = stream_.bufferSize;
\r
2250 stream_.nDeviceChannels[mode] = channels;
\r
2251 stream_.nUserChannels[mode] = channels;
\r
2253 // Set flags for buffer conversion.
\r
2254 stream_.doConvertBuffer[mode] = false;
\r
2255 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2256 stream_.doConvertBuffer[mode] = true;
\r
2257 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2258 stream_.nUserChannels[mode] > 1 )
\r
2259 stream_.doConvertBuffer[mode] = true;
\r
2261 // Allocate our JackHandle structure for the stream.
\r
2262 if ( handle == 0 ) {
\r
2264 handle = new JackHandle;
\r
2266 catch ( std::bad_alloc& ) {
\r
2267 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2271 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2272 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2275 stream_.apiHandle = (void *) handle;
\r
2276 handle->client = client;
\r
2278 handle->deviceName[mode] = deviceName;
\r
2280 // Allocate necessary internal buffers.
\r
2281 unsigned long bufferBytes;
\r
2282 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2283 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2284 if ( stream_.userBuffer[mode] == NULL ) {
\r
2285 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2289 if ( stream_.doConvertBuffer[mode] ) {
\r
2291 bool makeBuffer = true;
\r
2292 if ( mode == OUTPUT )
\r
2293 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2294 else { // mode == INPUT
\r
2295 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2296 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2297 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2298 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2302 if ( makeBuffer ) {
\r
2303 bufferBytes *= *bufferSize;
\r
2304 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2305 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2306 if ( stream_.deviceBuffer == NULL ) {
\r
2307 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2313 // Allocate memory for the Jack ports (channels) identifiers.
\r
2314 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2315 if ( handle->ports[mode] == NULL ) {
\r
2316 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2320 stream_.device[mode] = device;
\r
2321 stream_.channelOffset[mode] = firstChannel;
\r
2322 stream_.state = STREAM_STOPPED;
\r
2323 stream_.callbackInfo.object = (void *) this;
\r
2325 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2326 // We had already set up the stream for output.
\r
2327 stream_.mode = DUPLEX;
\r
2329 stream_.mode = mode;
\r
2330 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2331 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2332 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2335 // Register our ports.
\r
2337 if ( mode == OUTPUT ) {
\r
2338 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2339 snprintf( label, 64, "outport %d", i );
\r
2340 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2341 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2345 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2346 snprintf( label, 64, "inport %d", i );
\r
2347 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2348 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2352 // Setup the buffer conversion information structure. We don't use
\r
2353 // buffers to do channel offsets, so we override that parameter
\r
2355 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2357 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
\r
2363 pthread_cond_destroy( &handle->condition );
\r
2364 jack_client_close( handle->client );
\r
2366 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2367 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2370 stream_.apiHandle = 0;
\r
2373 for ( int i=0; i<2; i++ ) {
\r
2374 if ( stream_.userBuffer[i] ) {
\r
2375 free( stream_.userBuffer[i] );
\r
2376 stream_.userBuffer[i] = 0;
\r
2380 if ( stream_.deviceBuffer ) {
\r
2381 free( stream_.deviceBuffer );
\r
2382 stream_.deviceBuffer = 0;
\r
2388 void RtApiJack :: closeStream( void )
\r
2390 if ( stream_.state == STREAM_CLOSED ) {
\r
2391 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2392 error( RtAudioError::WARNING );
\r
2396 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2399 if ( stream_.state == STREAM_RUNNING )
\r
2400 jack_deactivate( handle->client );
\r
2402 jack_client_close( handle->client );
\r
2406 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2407 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2408 pthread_cond_destroy( &handle->condition );
\r
2410 stream_.apiHandle = 0;
\r
2413 for ( int i=0; i<2; i++ ) {
\r
2414 if ( stream_.userBuffer[i] ) {
\r
2415 free( stream_.userBuffer[i] );
\r
2416 stream_.userBuffer[i] = 0;
\r
2420 if ( stream_.deviceBuffer ) {
\r
2421 free( stream_.deviceBuffer );
\r
2422 stream_.deviceBuffer = 0;
\r
2425 stream_.mode = UNINITIALIZED;
\r
2426 stream_.state = STREAM_CLOSED;
\r
2429 void RtApiJack :: startStream( void )
\r
2432 if ( stream_.state == STREAM_RUNNING ) {
\r
2433 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2434 error( RtAudioError::WARNING );
\r
2438 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2439 int result = jack_activate( handle->client );
\r
2441 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2445 const char **ports;
\r
2447 // Get the list of available ports.
\r
2448 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
\r
2450 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2451 if ( ports == NULL) {
\r
2452 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2456 // Now make the port connections. Since RtAudio wasn't designed to
\r
2457 // allow the user to select particular channels of a device, we'll
\r
2458 // just open the first "nChannels" ports with offset.
\r
2459 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2461 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2462 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2465 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2472 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
\r
2474 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2475 if ( ports == NULL) {
\r
2476 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2480 // Now make the port connections. See note above.
\r
2481 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2483 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2484 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2487 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2494 handle->drainCounter = 0;
\r
2495 handle->internalDrain = false;
\r
2496 stream_.state = STREAM_RUNNING;
\r
2499 if ( result == 0 ) return;
\r
2500 error( RtAudioError::SYSTEM_ERROR );
\r
2503 void RtApiJack :: stopStream( void )
\r
2506 if ( stream_.state == STREAM_STOPPED ) {
\r
2507 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2508 error( RtAudioError::WARNING );
\r
2512 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2513 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2515 if ( handle->drainCounter == 0 ) {
\r
2516 handle->drainCounter = 2;
\r
2517 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2521 jack_deactivate( handle->client );
\r
2522 stream_.state = STREAM_STOPPED;
\r
2525 void RtApiJack :: abortStream( void )
\r
2528 if ( stream_.state == STREAM_STOPPED ) {
\r
2529 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2530 error( RtAudioError::WARNING );
\r
2534 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2535 handle->drainCounter = 2;
\r
2540 // This function will be called by a spawned thread when the user
\r
2541 // callback function signals that the stream should be stopped or
\r
2542 // aborted. It is necessary to handle it this way because the
\r
2543 // callbackEvent() function must return before the jack_deactivate()
\r
2544 // function will return.
\r
2545 static void *jackStopStream( void *ptr )
\r
2547 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2548 RtApiJack *object = (RtApiJack *) info->object;
\r
2550 object->stopStream();
\r
2551 pthread_exit( NULL );
\r
2554 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2556 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2557 if ( stream_.state == STREAM_CLOSED ) {
\r
2558 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2559 error( RtAudioError::WARNING );
\r
2562 if ( stream_.bufferSize != nframes ) {
\r
2563 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2564 error( RtAudioError::WARNING );
\r
2568 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2569 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2571 // Check if we were draining the stream and signal is finished.
\r
2572 if ( handle->drainCounter > 3 ) {
\r
2573 ThreadHandle threadId;
\r
2575 stream_.state = STREAM_STOPPING;
\r
2576 if ( handle->internalDrain == true )
\r
2577 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2579 pthread_cond_signal( &handle->condition );
\r
2583 // Invoke user callback first, to get fresh output data.
\r
2584 if ( handle->drainCounter == 0 ) {
\r
2585 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2586 double streamTime = getStreamTime();
\r
2587 RtAudioStreamStatus status = 0;
\r
2588 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2589 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2590 handle->xrun[0] = false;
\r
2592 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2593 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2594 handle->xrun[1] = false;
\r
2596 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2597 stream_.bufferSize, streamTime, status, info->userData );
\r
2598 if ( cbReturnValue == 2 ) {
\r
2599 stream_.state = STREAM_STOPPING;
\r
2600 handle->drainCounter = 2;
\r
2602 pthread_create( &id, NULL, jackStopStream, info );
\r
2605 else if ( cbReturnValue == 1 ) {
\r
2606 handle->drainCounter = 1;
\r
2607 handle->internalDrain = true;
\r
2611 jack_default_audio_sample_t *jackbuffer;
\r
2612 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2613 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2615 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2617 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2618 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2619 memset( jackbuffer, 0, bufferBytes );
\r
2623 else if ( stream_.doConvertBuffer[0] ) {
\r
2625 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2627 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2628 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2629 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2632 else { // no buffer conversion
\r
2633 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2635 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2640 // Don't bother draining input
\r
2641 if ( handle->drainCounter ) {
\r
2642 handle->drainCounter++;
\r
2646 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2648 if ( stream_.doConvertBuffer[1] ) {
\r
2649 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2650 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2651 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2653 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2655 else { // no buffer conversion
\r
2656 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2657 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2658 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2664 RtApi::tickStreamTime();
\r
2667 //******************** End of __UNIX_JACK__ *********************//
\r
2670 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2672 // The ASIO API is designed around a callback scheme, so this
\r
2673 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2674 // Jack. The primary constraint with ASIO is that it only allows
\r
2675 // access to a single driver at a time. Thus, it is not possible to
\r
2676 // have more than one simultaneous RtAudio stream.
\r
2678 // This implementation also requires a number of external ASIO files
\r
2679 // and a few global variables. The ASIO callback scheme does not
\r
2680 // allow for the passing of user data, so we must create a global
\r
2681 // pointer to our callbackInfo structure.
\r
2683 // On unix systems, we make use of a pthread condition variable.
\r
2684 // Since there is no equivalent in Windows, I hacked something based
\r
2685 // on information found in
\r
2686 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2688 #include "asiosys.h"
\r
2690 #include "iasiothiscallresolver.h"
\r
2691 #include "asiodrivers.h"
\r
2694 static AsioDrivers drivers;
\r
2695 static ASIOCallbacks asioCallbacks;
\r
2696 static ASIODriverInfo driverInfo;
\r
2697 static CallbackInfo *asioCallbackInfo;
\r
2698 static bool asioXRun;
\r
2700 struct AsioHandle {
\r
2701 int drainCounter; // Tracks callback counts when draining
\r
2702 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2703 ASIOBufferInfo *bufferInfos;
\r
2707 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2710 // Function declarations (definitions at end of section)
\r
2711 static const char* getAsioErrorString( ASIOError result );
\r
2712 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2713 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2715 RtApiAsio :: RtApiAsio()
\r
2717 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2718 // CoInitialize beforehand, but it must be for appartment threading
\r
2719 // (in which case, CoInitilialize will return S_FALSE here).
\r
2720 coInitialized_ = false;
\r
2721 HRESULT hr = CoInitialize( NULL );
\r
2722 if ( FAILED(hr) ) {
\r
2723 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2724 error( RtAudioError::WARNING );
\r
2726 coInitialized_ = true;
\r
2728 drivers.removeCurrentDriver();
\r
2729 driverInfo.asioVersion = 2;
\r
2731 // See note in DirectSound implementation about GetDesktopWindow().
\r
2732 driverInfo.sysRef = GetForegroundWindow();
\r
2735 RtApiAsio :: ~RtApiAsio()
\r
2737 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2738 if ( coInitialized_ ) CoUninitialize();
\r
2741 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2743 return (unsigned int) drivers.asioGetNumDev();
\r
2746 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2748 RtAudio::DeviceInfo info;
\r
2749 info.probed = false;
\r
2752 unsigned int nDevices = getDeviceCount();
\r
2753 if ( nDevices == 0 ) {
\r
2754 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2755 error( RtAudioError::INVALID_USE );
\r
2759 if ( device >= nDevices ) {
\r
2760 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2761 error( RtAudioError::INVALID_USE );
\r
2765 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2766 if ( stream_.state != STREAM_CLOSED ) {
\r
2767 if ( device >= devices_.size() ) {
\r
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2769 error( RtAudioError::WARNING );
\r
2772 return devices_[ device ];
\r
2775 char driverName[32];
\r
2776 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2777 if ( result != ASE_OK ) {
\r
2778 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2779 errorText_ = errorStream_.str();
\r
2780 error( RtAudioError::WARNING );
\r
2784 info.name = driverName;
\r
2786 if ( !drivers.loadDriver( driverName ) ) {
\r
2787 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2788 errorText_ = errorStream_.str();
\r
2789 error( RtAudioError::WARNING );
\r
2793 result = ASIOInit( &driverInfo );
\r
2794 if ( result != ASE_OK ) {
\r
2795 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2797 error( RtAudioError::WARNING );
\r
2801 // Determine the device channel information.
\r
2802 long inputChannels, outputChannels;
\r
2803 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2804 if ( result != ASE_OK ) {
\r
2805 drivers.removeCurrentDriver();
\r
2806 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2807 errorText_ = errorStream_.str();
\r
2808 error( RtAudioError::WARNING );
\r
2812 info.outputChannels = outputChannels;
\r
2813 info.inputChannels = inputChannels;
\r
2814 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2815 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2817 // Determine the supported sample rates.
\r
2818 info.sampleRates.clear();
\r
2819 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2820 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2821 if ( result == ASE_OK ) {
\r
2822 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2824 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2825 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2829 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2830 ASIOChannelInfo channelInfo;
\r
2831 channelInfo.channel = 0;
\r
2832 channelInfo.isInput = true;
\r
2833 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2834 result = ASIOGetChannelInfo( &channelInfo );
\r
2835 if ( result != ASE_OK ) {
\r
2836 drivers.removeCurrentDriver();
\r
2837 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2838 errorText_ = errorStream_.str();
\r
2839 error( RtAudioError::WARNING );
\r
2843 info.nativeFormats = 0;
\r
2844 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2845 info.nativeFormats |= RTAUDIO_SINT16;
\r
2846 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2847 info.nativeFormats |= RTAUDIO_SINT32;
\r
2848 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2849 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2850 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2851 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2852 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2853 info.nativeFormats |= RTAUDIO_SINT24;
\r
2855 if ( info.outputChannels > 0 )
\r
2856 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2857 if ( info.inputChannels > 0 )
\r
2858 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2860 info.probed = true;
\r
2861 drivers.removeCurrentDriver();
\r
2865 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2867 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2868 object->callbackEvent( index );
\r
2871 void RtApiAsio :: saveDeviceInfo( void )
\r
2875 unsigned int nDevices = getDeviceCount();
\r
2876 devices_.resize( nDevices );
\r
2877 for ( unsigned int i=0; i<nDevices; i++ )
\r
2878 devices_[i] = getDeviceInfo( i );
\r
2881 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2882 unsigned int firstChannel, unsigned int sampleRate,
\r
2883 RtAudioFormat format, unsigned int *bufferSize,
\r
2884 RtAudio::StreamOptions *options )
\r
2885 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2887 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2889 // For ASIO, a duplex stream MUST use the same driver.
\r
2890 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2891 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2895 char driverName[32];
\r
2896 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2897 if ( result != ASE_OK ) {
\r
2898 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2899 errorText_ = errorStream_.str();
\r
2903 // Only load the driver once for duplex stream.
\r
2904 if ( !isDuplexInput ) {
\r
2905 // The getDeviceInfo() function will not work when a stream is open
\r
2906 // because ASIO does not allow multiple devices to run at the same
\r
2907 // time. Thus, we'll probe the system before opening a stream and
\r
2908 // save the results for use by getDeviceInfo().
\r
2909 this->saveDeviceInfo();
\r
2911 if ( !drivers.loadDriver( driverName ) ) {
\r
2912 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2913 errorText_ = errorStream_.str();
\r
2917 result = ASIOInit( &driverInfo );
\r
2918 if ( result != ASE_OK ) {
\r
2919 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2920 errorText_ = errorStream_.str();
\r
2925 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2926 bool buffersAllocated = false;
\r
2927 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2928 unsigned int nChannels;
\r
2931 // Check the device channel count.
\r
2932 long inputChannels, outputChannels;
\r
2933 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2934 if ( result != ASE_OK ) {
\r
2935 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2936 errorText_ = errorStream_.str();
\r
2940 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2941 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2942 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2943 errorText_ = errorStream_.str();
\r
2946 stream_.nDeviceChannels[mode] = channels;
\r
2947 stream_.nUserChannels[mode] = channels;
\r
2948 stream_.channelOffset[mode] = firstChannel;
\r
2950 // Verify the sample rate is supported.
\r
2951 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2952 if ( result != ASE_OK ) {
\r
2953 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2954 errorText_ = errorStream_.str();
\r
2958 // Get the current sample rate
\r
2959 ASIOSampleRate currentRate;
\r
2960 result = ASIOGetSampleRate( ¤tRate );
\r
2961 if ( result != ASE_OK ) {
\r
2962 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2963 errorText_ = errorStream_.str();
\r
2967 // Set the sample rate only if necessary
\r
2968 if ( currentRate != sampleRate ) {
\r
2969 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2970 if ( result != ASE_OK ) {
\r
2971 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2972 errorText_ = errorStream_.str();
\r
2977 // Determine the driver data type.
\r
2978 ASIOChannelInfo channelInfo;
\r
2979 channelInfo.channel = 0;
\r
2980 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2981 else channelInfo.isInput = true;
\r
2982 result = ASIOGetChannelInfo( &channelInfo );
\r
2983 if ( result != ASE_OK ) {
\r
2984 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2985 errorText_ = errorStream_.str();
\r
2989 // Assuming WINDOWS host is always little-endian.
\r
2990 stream_.doByteSwap[mode] = false;
\r
2991 stream_.userFormat = format;
\r
2992 stream_.deviceFormat[mode] = 0;
\r
2993 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2994 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2995 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2997 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2998 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2999 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3001 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3002 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3003 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3005 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3006 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3007 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3009 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3010 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3011 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3014 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3016 errorText_ = errorStream_.str();
\r
3020 // Set the buffer size. For a duplex stream, this will end up
\r
3021 // setting the buffer size based on the input constraints, which
\r
3023 long minSize, maxSize, preferSize, granularity;
\r
3024 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3025 if ( result != ASE_OK ) {
\r
3026 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3027 errorText_ = errorStream_.str();
\r
3031 if ( isDuplexInput ) {
\r
3032 // When this is the duplex input (output was opened before), then we have to use the same
\r
3033 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3034 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3035 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3036 // to the "bufferSize" param as usual to set up processing buffers.
\r
3038 *bufferSize = stream_.bufferSize;
\r
3041 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3042 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3043 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3044 else if ( granularity == -1 ) {
\r
3045 // Make sure bufferSize is a power of two.
\r
3046 int log2_of_min_size = 0;
\r
3047 int log2_of_max_size = 0;
\r
3049 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3050 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3051 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3054 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3055 int min_delta_num = log2_of_min_size;
\r
3057 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3058 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3059 if (current_delta < min_delta) {
\r
3060 min_delta = current_delta;
\r
3061 min_delta_num = i;
\r
3065 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3066 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3067 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3069 else if ( granularity != 0 ) {
\r
3070 // Set to an even multiple of granularity, rounding up.
\r
3071 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3076 // we don't use it anymore, see above!
\r
3077 // Just left it here for the case...
\r
3078 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3079 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3084 stream_.bufferSize = *bufferSize;
\r
3085 stream_.nBuffers = 2;
\r
3087 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3088 else stream_.userInterleaved = true;
\r
3090 // ASIO always uses non-interleaved buffers.
\r
3091 stream_.deviceInterleaved[mode] = false;
\r
3093 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3094 if ( handle == 0 ) {
\r
3096 handle = new AsioHandle;
\r
3098 catch ( std::bad_alloc& ) {
\r
3099 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3102 handle->bufferInfos = 0;
\r
3104 // Create a manual-reset event.
\r
3105 handle->condition = CreateEvent( NULL, // no security
\r
3106 TRUE, // manual-reset
\r
3107 FALSE, // non-signaled initially
\r
3108 NULL ); // unnamed
\r
3109 stream_.apiHandle = (void *) handle;
\r
3112 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3113 // and output separately, we'll have to dispose of previously
\r
3114 // created output buffers for a duplex stream.
\r
3115 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3116 ASIODisposeBuffers();
\r
3117 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3120 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3122 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3123 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3124 if ( handle->bufferInfos == NULL ) {
\r
3125 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3126 errorText_ = errorStream_.str();
\r
3130 ASIOBufferInfo *infos;
\r
3131 infos = handle->bufferInfos;
\r
3132 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3133 infos->isInput = ASIOFalse;
\r
3134 infos->channelNum = i + stream_.channelOffset[0];
\r
3135 infos->buffers[0] = infos->buffers[1] = 0;
\r
3137 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3138 infos->isInput = ASIOTrue;
\r
3139 infos->channelNum = i + stream_.channelOffset[1];
\r
3140 infos->buffers[0] = infos->buffers[1] = 0;
\r
3143 // prepare for callbacks
\r
3144 stream_.sampleRate = sampleRate;
\r
3145 stream_.device[mode] = device;
\r
3146 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3148 // store this class instance before registering callbacks, that are going to use it
\r
3149 asioCallbackInfo = &stream_.callbackInfo;
\r
3150 stream_.callbackInfo.object = (void *) this;
\r
3152 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3153 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3154 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3155 asioCallbacks.asioMessage = &asioMessages;
\r
3156 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3157 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3158 if ( result != ASE_OK ) {
\r
3159 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3160 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3161 // in that case, let's be naïve and try that instead
\r
3162 *bufferSize = preferSize;
\r
3163 stream_.bufferSize = *bufferSize;
\r
3164 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3167 if ( result != ASE_OK ) {
\r
3168 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3169 errorText_ = errorStream_.str();
\r
3172 buffersAllocated = true;
\r
3173 stream_.state = STREAM_STOPPED;
\r
3175 // Set flags for buffer conversion.
\r
3176 stream_.doConvertBuffer[mode] = false;
\r
3177 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3178 stream_.doConvertBuffer[mode] = true;
\r
3179 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3180 stream_.nUserChannels[mode] > 1 )
\r
3181 stream_.doConvertBuffer[mode] = true;
\r
3183 // Allocate necessary internal buffers
\r
3184 unsigned long bufferBytes;
\r
3185 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3186 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3187 if ( stream_.userBuffer[mode] == NULL ) {
\r
3188 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3192 if ( stream_.doConvertBuffer[mode] ) {
\r
3194 bool makeBuffer = true;
\r
3195 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3196 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3197 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3198 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3201 if ( makeBuffer ) {
\r
3202 bufferBytes *= *bufferSize;
\r
3203 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3204 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3205 if ( stream_.deviceBuffer == NULL ) {
\r
3206 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3212 // Determine device latencies
\r
3213 long inputLatency, outputLatency;
\r
3214 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3215 if ( result != ASE_OK ) {
\r
3216 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3217 errorText_ = errorStream_.str();
\r
3218 error( RtAudioError::WARNING); // warn but don't fail
\r
3221 stream_.latency[0] = outputLatency;
\r
3222 stream_.latency[1] = inputLatency;
\r
3225 // Setup the buffer conversion information structure. We don't use
\r
3226 // buffers to do channel offsets, so we override that parameter
\r
3228 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3233 if ( !isDuplexInput ) {
\r
3234 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3235 // So we clean up for single channel only
\r
3237 if ( buffersAllocated )
\r
3238 ASIODisposeBuffers();
\r
3240 drivers.removeCurrentDriver();
\r
3243 CloseHandle( handle->condition );
\r
3244 if ( handle->bufferInfos )
\r
3245 free( handle->bufferInfos );
\r
3248 stream_.apiHandle = 0;
\r
3252 if ( stream_.userBuffer[mode] ) {
\r
3253 free( stream_.userBuffer[mode] );
\r
3254 stream_.userBuffer[mode] = 0;
\r
3257 if ( stream_.deviceBuffer ) {
\r
3258 free( stream_.deviceBuffer );
\r
3259 stream_.deviceBuffer = 0;
\r
3264 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3266 void RtApiAsio :: closeStream()
\r
3268 if ( stream_.state == STREAM_CLOSED ) {
\r
3269 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3270 error( RtAudioError::WARNING );
\r
3274 if ( stream_.state == STREAM_RUNNING ) {
\r
3275 stream_.state = STREAM_STOPPED;
\r
3278 ASIODisposeBuffers();
\r
3279 drivers.removeCurrentDriver();
\r
3281 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3283 CloseHandle( handle->condition );
\r
3284 if ( handle->bufferInfos )
\r
3285 free( handle->bufferInfos );
\r
3287 stream_.apiHandle = 0;
\r
3290 for ( int i=0; i<2; i++ ) {
\r
3291 if ( stream_.userBuffer[i] ) {
\r
3292 free( stream_.userBuffer[i] );
\r
3293 stream_.userBuffer[i] = 0;
\r
3297 if ( stream_.deviceBuffer ) {
\r
3298 free( stream_.deviceBuffer );
\r
3299 stream_.deviceBuffer = 0;
\r
3302 stream_.mode = UNINITIALIZED;
\r
3303 stream_.state = STREAM_CLOSED;
\r
3306 bool stopThreadCalled = false;
\r
3308 void RtApiAsio :: startStream()
\r
3311 if ( stream_.state == STREAM_RUNNING ) {
\r
3312 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3313 error( RtAudioError::WARNING );
\r
3317 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3318 ASIOError result = ASIOStart();
\r
3319 if ( result != ASE_OK ) {
\r
3320 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3321 errorText_ = errorStream_.str();
\r
3325 handle->drainCounter = 0;
\r
3326 handle->internalDrain = false;
\r
3327 ResetEvent( handle->condition );
\r
3328 stream_.state = STREAM_RUNNING;
\r
3332 stopThreadCalled = false;
\r
3334 if ( result == ASE_OK ) return;
\r
3335 error( RtAudioError::SYSTEM_ERROR );
\r
3338 void RtApiAsio :: stopStream()
\r
3341 if ( stream_.state == STREAM_STOPPED ) {
\r
3342 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3343 error( RtAudioError::WARNING );
\r
3347 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3348 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3349 if ( handle->drainCounter == 0 ) {
\r
3350 handle->drainCounter = 2;
\r
3351 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3355 stream_.state = STREAM_STOPPED;
\r
3357 ASIOError result = ASIOStop();
\r
3358 if ( result != ASE_OK ) {
\r
3359 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3360 errorText_ = errorStream_.str();
\r
3363 if ( result == ASE_OK ) return;
\r
3364 error( RtAudioError::SYSTEM_ERROR );
\r
3367 void RtApiAsio :: abortStream()
\r
3370 if ( stream_.state == STREAM_STOPPED ) {
\r
3371 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3372 error( RtAudioError::WARNING );
\r
3376 // The following lines were commented-out because some behavior was
\r
3377 // noted where the device buffers need to be zeroed to avoid
\r
3378 // continuing sound, even when the device buffers are completely
\r
3379 // disposed. So now, calling abort is the same as calling stop.
\r
3380 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3381 // handle->drainCounter = 2;
\r
3385 // This function will be called by a spawned thread when the user
\r
3386 // callback function signals that the stream should be stopped or
\r
3387 // aborted. It is necessary to handle it this way because the
\r
3388 // callbackEvent() function must return before the ASIOStop()
\r
3389 // function will return.
\r
3390 static unsigned __stdcall asioStopStream( void *ptr )
\r
3392 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3393 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3395 object->stopStream();
\r
3396 _endthreadex( 0 );
\r
3400 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3402 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3403 if ( stream_.state == STREAM_CLOSED ) {
\r
3404 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3405 error( RtAudioError::WARNING );
\r
3409 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3410 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3412 // Check if we were draining the stream and signal if finished.
\r
3413 if ( handle->drainCounter > 3 ) {
\r
3415 stream_.state = STREAM_STOPPING;
\r
3416 if ( handle->internalDrain == false )
\r
3417 SetEvent( handle->condition );
\r
3418 else { // spawn a thread to stop the stream
\r
3419 unsigned threadId;
\r
3420 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3421 &stream_.callbackInfo, 0, &threadId );
\r
3426 // Invoke user callback to get fresh output data UNLESS we are
\r
3427 // draining stream.
\r
3428 if ( handle->drainCounter == 0 ) {
\r
3429 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3430 double streamTime = getStreamTime();
\r
3431 RtAudioStreamStatus status = 0;
\r
3432 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3433 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3436 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3437 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3440 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3441 stream_.bufferSize, streamTime, status, info->userData );
\r
3442 if ( cbReturnValue == 2 ) {
\r
3443 stream_.state = STREAM_STOPPING;
\r
3444 handle->drainCounter = 2;
\r
3445 unsigned threadId;
\r
3446 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3447 &stream_.callbackInfo, 0, &threadId );
\r
3450 else if ( cbReturnValue == 1 ) {
\r
3451 handle->drainCounter = 1;
\r
3452 handle->internalDrain = true;
\r
3456 unsigned int nChannels, bufferBytes, i, j;
\r
3457 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3458 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3460 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3462 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3464 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3465 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3466 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3470 else if ( stream_.doConvertBuffer[0] ) {
\r
3472 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3473 if ( stream_.doByteSwap[0] )
\r
3474 byteSwapBuffer( stream_.deviceBuffer,
\r
3475 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3476 stream_.deviceFormat[0] );
\r
3478 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3479 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3480 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3481 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3487 if ( stream_.doByteSwap[0] )
\r
3488 byteSwapBuffer( stream_.userBuffer[0],
\r
3489 stream_.bufferSize * stream_.nUserChannels[0],
\r
3490 stream_.userFormat );
\r
3492 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3493 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3494 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3495 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3501 // Don't bother draining input
\r
3502 if ( handle->drainCounter ) {
\r
3503 handle->drainCounter++;
\r
3507 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3509 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3511 if (stream_.doConvertBuffer[1]) {
\r
3513 // Always interleave ASIO input data.
\r
3514 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3515 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3516 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3517 handle->bufferInfos[i].buffers[bufferIndex],
\r
3521 if ( stream_.doByteSwap[1] )
\r
3522 byteSwapBuffer( stream_.deviceBuffer,
\r
3523 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3524 stream_.deviceFormat[1] );
\r
3525 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3529 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3530 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3531 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3532 handle->bufferInfos[i].buffers[bufferIndex],
\r
3537 if ( stream_.doByteSwap[1] )
\r
3538 byteSwapBuffer( stream_.userBuffer[1],
\r
3539 stream_.bufferSize * stream_.nUserChannels[1],
\r
3540 stream_.userFormat );
\r
3545 // The following call was suggested by Malte Clasen. While the API
\r
3546 // documentation indicates it should not be required, some device
\r
3547 // drivers apparently do not function correctly without it.
\r
3548 ASIOOutputReady();
\r
3550 RtApi::tickStreamTime();
\r
3554 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3556 // The ASIO documentation says that this usually only happens during
\r
3557 // external sync. Audio processing is not stopped by the driver,
\r
3558 // actual sample rate might not have even changed, maybe only the
\r
3559 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3562 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3564 object->stopStream();
\r
3566 catch ( RtAudioError &exception ) {
\r
3567 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3571 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3574 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3578 switch( selector ) {
\r
3579 case kAsioSelectorSupported:
\r
3580 if ( value == kAsioResetRequest
\r
3581 || value == kAsioEngineVersion
\r
3582 || value == kAsioResyncRequest
\r
3583 || value == kAsioLatenciesChanged
\r
3584 // The following three were added for ASIO 2.0, you don't
\r
3585 // necessarily have to support them.
\r
3586 || value == kAsioSupportsTimeInfo
\r
3587 || value == kAsioSupportsTimeCode
\r
3588 || value == kAsioSupportsInputMonitor)
\r
3591 case kAsioResetRequest:
\r
3592 // Defer the task and perform the reset of the driver during the
\r
3593 // next "safe" situation. You cannot reset the driver right now,
\r
3594 // as this code is called from the driver. Reset the driver is
\r
3595 // done by completely destruct is. I.e. ASIOStop(),
\r
3596 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3598 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3601 case kAsioResyncRequest:
\r
3602 // This informs the application that the driver encountered some
\r
3603 // non-fatal data loss. It is used for synchronization purposes
\r
3604 // of different media. Added mainly to work around the Win16Mutex
\r
3605 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3606 // which could lose data because the Mutex was held too long by
\r
3607 // another thread. However a driver can issue it in other
\r
3608 // situations, too.
\r
3609 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3613 case kAsioLatenciesChanged:
\r
3614 // This will inform the host application that the drivers were
\r
3615 // latencies changed. Beware, it this does not mean that the
\r
3616 // buffer sizes have changed! You might need to update internal
\r
3618 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3621 case kAsioEngineVersion:
\r
3622 // Return the supported ASIO version of the host application. If
\r
3623 // a host application does not implement this selector, ASIO 1.0
\r
3624 // is assumed by the driver.
\r
3627 case kAsioSupportsTimeInfo:
\r
3628 // Informs the driver whether the
\r
3629 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3630 // For compatibility with ASIO 1.0 drivers the host application
\r
3631 // should always support the "old" bufferSwitch method, too.
\r
3634 case kAsioSupportsTimeCode:
\r
3635 // Informs the driver whether application is interested in time
\r
3636 // code info. If an application does not need to know about time
\r
3637 // code, the driver has less work to do.
\r
3644 static const char* getAsioErrorString( ASIOError result )
\r
3649 const char*message;
\r
3652 static const Messages m[] =
\r
3654 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3655 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3656 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3657 { ASE_InvalidMode, "Invalid mode." },
\r
3658 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3659 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3660 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3663 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3664 if ( m[i].value == result ) return m[i].message;
\r
3666 return "Unknown error.";
\r
3669 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3673 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3675 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3676 // - Introduces support for the Windows WASAPI API
\r
3677 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3678 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3679 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3684 #include <audioclient.h>
\r
3686 #include <mmdeviceapi.h>
\r
3687 #include <functiondiscoverykeys_devpkey.h>
\r
3689 //=============================================================================
\r
3691 #define SAFE_RELEASE( objectPtr )\
\r
3694 objectPtr->Release();\
\r
3695 objectPtr = NULL;\
\r
3698 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3700 //-----------------------------------------------------------------------------
\r
3702 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3703 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3704 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3705 // provide intermediate storage for read / write synchronization.
\r
3706 class WasapiBuffer
\r
3710 : buffer_( NULL ),
\r
3719 // sets the length of the internal ring buffer
\r
3720 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3723 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3725 bufferSize_ = bufferSize;
\r
3730 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3731 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3733 if ( !buffer || // incoming buffer is NULL
\r
3734 bufferSize == 0 || // incoming buffer has no data
\r
3735 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3740 unsigned int relOutIndex = outIndex_;
\r
3741 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3742 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3743 relOutIndex += bufferSize_;
\r
3746 // "in" index can end on the "out" index but cannot begin at it
\r
3747 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3748 return false; // not enough space between "in" index and "out" index
\r
3751 // copy buffer from external to internal
\r
3752 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3753 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3754 int fromInSize = bufferSize - fromZeroSize;
\r
3758 case RTAUDIO_SINT8:
\r
3759 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3760 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3762 case RTAUDIO_SINT16:
\r
3763 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3764 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3766 case RTAUDIO_SINT24:
\r
3767 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3768 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3770 case RTAUDIO_SINT32:
\r
3771 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3772 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3774 case RTAUDIO_FLOAT32:
\r
3775 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3776 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3778 case RTAUDIO_FLOAT64:
\r
3779 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3780 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3784 // update "in" index
\r
3785 inIndex_ += bufferSize;
\r
3786 inIndex_ %= bufferSize_;
\r
3791 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3792 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3794 if ( !buffer || // incoming buffer is NULL
\r
3795 bufferSize == 0 || // incoming buffer has no data
\r
3796 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3801 unsigned int relInIndex = inIndex_;
\r
3802 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3803 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3804 relInIndex += bufferSize_;
\r
3807 // "out" index can begin at and end on the "in" index
\r
3808 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3809 return false; // not enough space between "out" index and "in" index
\r
3812 // copy buffer from internal to external
\r
3813 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3814 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3815 int fromOutSize = bufferSize - fromZeroSize;
\r
3819 case RTAUDIO_SINT8:
\r
3820 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3821 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3823 case RTAUDIO_SINT16:
\r
3824 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3825 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3827 case RTAUDIO_SINT24:
\r
3828 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3829 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3831 case RTAUDIO_SINT32:
\r
3832 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3833 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3835 case RTAUDIO_FLOAT32:
\r
3836 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3837 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3839 case RTAUDIO_FLOAT64:
\r
3840 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3841 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3845 // update "out" index
\r
3846 outIndex_ += bufferSize;
\r
3847 outIndex_ %= bufferSize_;
\r
3854 unsigned int bufferSize_;
\r
3855 unsigned int inIndex_;
\r
3856 unsigned int outIndex_;
\r
3859 //-----------------------------------------------------------------------------
\r
3861 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3862 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3863 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3864 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3865 // one rate and its multiple.
\r
3866 void convertBufferWasapi( char* outBuffer,
\r
3867 const char* inBuffer,
\r
3868 const unsigned int& channelCount,
\r
3869 const unsigned int& inSampleRate,
\r
3870 const unsigned int& outSampleRate,
\r
3871 const unsigned int& inSampleCount,
\r
3872 unsigned int& outSampleCount,
\r
3873 const RtAudioFormat& format )
\r
3875 // calculate the new outSampleCount and relative sampleStep
\r
3876 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3877 float sampleStep = 1.0f / sampleRatio;
\r
3878 float inSampleFraction = 0.0f;
\r
3880 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3882 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3883 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3885 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3889 case RTAUDIO_SINT8:
\r
3890 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3892 case RTAUDIO_SINT16:
\r
3893 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3895 case RTAUDIO_SINT24:
\r
3896 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3898 case RTAUDIO_SINT32:
\r
3899 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3901 case RTAUDIO_FLOAT32:
\r
3902 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3904 case RTAUDIO_FLOAT64:
\r
3905 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3909 // jump to next in sample
\r
3910 inSampleFraction += sampleStep;
\r
3914 //-----------------------------------------------------------------------------
\r
3916 // A structure to hold various information related to the WASAPI implementation.
\r
3917 struct WasapiHandle
\r
3919 IAudioClient* captureAudioClient;
\r
3920 IAudioClient* renderAudioClient;
\r
3921 IAudioCaptureClient* captureClient;
\r
3922 IAudioRenderClient* renderClient;
\r
3923 HANDLE captureEvent;
\r
3924 HANDLE renderEvent;
\r
3927 : captureAudioClient( NULL ),
\r
3928 renderAudioClient( NULL ),
\r
3929 captureClient( NULL ),
\r
3930 renderClient( NULL ),
\r
3931 captureEvent( NULL ),
\r
3932 renderEvent( NULL ) {}
\r
3935 //=============================================================================
\r
3937 RtApiWasapi::RtApiWasapi()
\r
3938 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3940 // WASAPI can run either apartment or multi-threaded
\r
3941 HRESULT hr = CoInitialize( NULL );
\r
3942 if ( !FAILED( hr ) )
\r
3943 coInitialized_ = true;
\r
3945 // Instantiate device enumerator
\r
3946 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3947 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3948 ( void** ) &deviceEnumerator_ );
\r
3950 if ( FAILED( hr ) ) {
\r
3951 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3952 error( RtAudioError::DRIVER_ERROR );
\r
3956 //-----------------------------------------------------------------------------
\r
3958 RtApiWasapi::~RtApiWasapi()
\r
3960 if ( stream_.state != STREAM_CLOSED )
\r
3963 SAFE_RELEASE( deviceEnumerator_ );
\r
3965 // If this object previously called CoInitialize()
\r
3966 if ( coInitialized_ )
\r
3970 //=============================================================================
\r
3972 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3974 unsigned int captureDeviceCount = 0;
\r
3975 unsigned int renderDeviceCount = 0;
\r
3977 IMMDeviceCollection* captureDevices = NULL;
\r
3978 IMMDeviceCollection* renderDevices = NULL;
\r
3980 // Count capture devices
\r
3981 errorText_.clear();
\r
3982 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3983 if ( FAILED( hr ) ) {
\r
3984 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3988 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3989 if ( FAILED( hr ) ) {
\r
3990 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3994 // Count render devices
\r
3995 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3996 if ( FAILED( hr ) ) {
\r
3997 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4001 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4002 if ( FAILED( hr ) ) {
\r
4003 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4008 // release all references
\r
4009 SAFE_RELEASE( captureDevices );
\r
4010 SAFE_RELEASE( renderDevices );
\r
4012 if ( errorText_.empty() )
\r
4013 return captureDeviceCount + renderDeviceCount;
\r
4015 error( RtAudioError::DRIVER_ERROR );
\r
4019 //-----------------------------------------------------------------------------
\r
4021 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4023 RtAudio::DeviceInfo info;
\r
4024 unsigned int captureDeviceCount = 0;
\r
4025 unsigned int renderDeviceCount = 0;
\r
4026 std::string defaultDeviceName;
\r
4027 bool isCaptureDevice = false;
\r
4029 PROPVARIANT deviceNameProp;
\r
4030 PROPVARIANT defaultDeviceNameProp;
\r
4032 IMMDeviceCollection* captureDevices = NULL;
\r
4033 IMMDeviceCollection* renderDevices = NULL;
\r
4034 IMMDevice* devicePtr = NULL;
\r
4035 IMMDevice* defaultDevicePtr = NULL;
\r
4036 IAudioClient* audioClient = NULL;
\r
4037 IPropertyStore* devicePropStore = NULL;
\r
4038 IPropertyStore* defaultDevicePropStore = NULL;
\r
4040 WAVEFORMATEX* deviceFormat = NULL;
\r
4041 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4044 info.probed = false;
\r
4046 // Count capture devices
\r
4047 errorText_.clear();
\r
4048 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4049 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4050 if ( FAILED( hr ) ) {
\r
4051 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4055 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4056 if ( FAILED( hr ) ) {
\r
4057 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4061 // Count render devices
\r
4062 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4063 if ( FAILED( hr ) ) {
\r
4064 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4068 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4069 if ( FAILED( hr ) ) {
\r
4070 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4074 // validate device index
\r
4075 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4077 errorType = RtAudioError::INVALID_USE;
\r
4081 // determine whether index falls within capture or render devices
\r
4082 if ( device >= renderDeviceCount ) {
\r
4083 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4084 if ( FAILED( hr ) ) {
\r
4085 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4088 isCaptureDevice = true;
\r
4091 hr = renderDevices->Item( device, &devicePtr );
\r
4092 if ( FAILED( hr ) ) {
\r
4093 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4096 isCaptureDevice = false;
\r
4099 // get default device name
\r
4100 if ( isCaptureDevice ) {
\r
4101 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4102 if ( FAILED( hr ) ) {
\r
4103 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4108 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4109 if ( FAILED( hr ) ) {
\r
4110 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4115 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4116 if ( FAILED( hr ) ) {
\r
4117 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4120 PropVariantInit( &defaultDeviceNameProp );
\r
4122 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4123 if ( FAILED( hr ) ) {
\r
4124 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4128 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4131 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4132 if ( FAILED( hr ) ) {
\r
4133 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4137 PropVariantInit( &deviceNameProp );
\r
4139 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4140 if ( FAILED( hr ) ) {
\r
4141 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4145 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4148 if ( isCaptureDevice ) {
\r
4149 info.isDefaultInput = info.name == defaultDeviceName;
\r
4150 info.isDefaultOutput = false;
\r
4153 info.isDefaultInput = false;
\r
4154 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4158 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4159 if ( FAILED( hr ) ) {
\r
4160 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4164 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4165 if ( FAILED( hr ) ) {
\r
4166 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4170 if ( isCaptureDevice ) {
\r
4171 info.inputChannels = deviceFormat->nChannels;
\r
4172 info.outputChannels = 0;
\r
4173 info.duplexChannels = 0;
\r
4176 info.inputChannels = 0;
\r
4177 info.outputChannels = deviceFormat->nChannels;
\r
4178 info.duplexChannels = 0;
\r
4182 info.sampleRates.clear();
\r
4184 // allow support for all sample rates as we have a built-in sample rate converter
\r
4185 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4186 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4188 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4191 info.nativeFormats = 0;
\r
4193 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4194 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4195 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4197 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4198 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4200 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4201 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4204 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4205 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4206 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4208 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4209 info.nativeFormats |= RTAUDIO_SINT8;
\r
4211 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4212 info.nativeFormats |= RTAUDIO_SINT16;
\r
4214 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4215 info.nativeFormats |= RTAUDIO_SINT24;
\r
4217 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4218 info.nativeFormats |= RTAUDIO_SINT32;
\r
4223 info.probed = true;
\r
4226 // release all references
\r
4227 PropVariantClear( &deviceNameProp );
\r
4228 PropVariantClear( &defaultDeviceNameProp );
\r
4230 SAFE_RELEASE( captureDevices );
\r
4231 SAFE_RELEASE( renderDevices );
\r
4232 SAFE_RELEASE( devicePtr );
\r
4233 SAFE_RELEASE( defaultDevicePtr );
\r
4234 SAFE_RELEASE( audioClient );
\r
4235 SAFE_RELEASE( devicePropStore );
\r
4236 SAFE_RELEASE( defaultDevicePropStore );
\r
4238 CoTaskMemFree( deviceFormat );
\r
4239 CoTaskMemFree( closestMatchFormat );
\r
4241 if ( !errorText_.empty() )
\r
4242 error( errorType );
\r
4246 //-----------------------------------------------------------------------------
\r
4248 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4250 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4251 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4259 //-----------------------------------------------------------------------------
\r
4261 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4263 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4264 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4272 //-----------------------------------------------------------------------------
\r
4274 void RtApiWasapi::closeStream( void )
\r
4276 if ( stream_.state == STREAM_CLOSED ) {
\r
4277 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4278 error( RtAudioError::WARNING );
\r
4282 if ( stream_.state != STREAM_STOPPED )
\r
4285 // clean up stream memory
\r
4286 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4287 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4289 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4290 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4292 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4293 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4295 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4296 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4298 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4299 stream_.apiHandle = NULL;
\r
4301 for ( int i = 0; i < 2; i++ ) {
\r
4302 if ( stream_.userBuffer[i] ) {
\r
4303 free( stream_.userBuffer[i] );
\r
4304 stream_.userBuffer[i] = 0;
\r
4308 if ( stream_.deviceBuffer ) {
\r
4309 free( stream_.deviceBuffer );
\r
4310 stream_.deviceBuffer = 0;
\r
4313 // update stream state
\r
4314 stream_.state = STREAM_CLOSED;
\r
4317 //-----------------------------------------------------------------------------
\r
4319 void RtApiWasapi::startStream( void )
\r
4323 if ( stream_.state == STREAM_RUNNING ) {
\r
4324 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4325 error( RtAudioError::WARNING );
\r
4329 // update stream state
\r
4330 stream_.state = STREAM_RUNNING;
\r
4332 // create WASAPI stream thread
\r
4333 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4335 if ( !stream_.callbackInfo.thread ) {
\r
4336 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4337 error( RtAudioError::THREAD_ERROR );
\r
4340 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4341 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4345 //-----------------------------------------------------------------------------
\r
4347 void RtApiWasapi::stopStream( void )
\r
4351 if ( stream_.state == STREAM_STOPPED ) {
\r
4352 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4353 error( RtAudioError::WARNING );
\r
4357 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4358 stream_.state = STREAM_STOPPING;
\r
4360 // wait until stream thread is stopped
\r
4361 while( stream_.state != STREAM_STOPPED ) {
\r
4365 // Wait for the last buffer to play before stopping.
\r
4366 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4368 // stop capture client if applicable
\r
4369 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4370 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4371 if ( FAILED( hr ) ) {
\r
4372 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4373 error( RtAudioError::DRIVER_ERROR );
\r
4378 // stop render client if applicable
\r
4379 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4380 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4381 if ( FAILED( hr ) ) {
\r
4382 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4383 error( RtAudioError::DRIVER_ERROR );
\r
4388 // close thread handle
\r
4389 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4390 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4391 error( RtAudioError::THREAD_ERROR );
\r
4395 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4398 //-----------------------------------------------------------------------------
\r
4400 void RtApiWasapi::abortStream( void )
\r
4404 if ( stream_.state == STREAM_STOPPED ) {
\r
4405 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4406 error( RtAudioError::WARNING );
\r
4410 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4411 stream_.state = STREAM_STOPPING;
\r
4413 // wait until stream thread is stopped
\r
4414 while ( stream_.state != STREAM_STOPPED ) {
\r
4418 // stop capture client if applicable
\r
4419 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4420 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4421 if ( FAILED( hr ) ) {
\r
4422 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4423 error( RtAudioError::DRIVER_ERROR );
\r
4428 // stop render client if applicable
\r
4429 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4430 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4431 if ( FAILED( hr ) ) {
\r
4432 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4433 error( RtAudioError::DRIVER_ERROR );
\r
4438 // close thread handle
\r
4439 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4440 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4441 error( RtAudioError::THREAD_ERROR );
\r
4445 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4448 //-----------------------------------------------------------------------------
\r
4450 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4451 unsigned int firstChannel, unsigned int sampleRate,
\r
4452 RtAudioFormat format, unsigned int* bufferSize,
\r
4453 RtAudio::StreamOptions* options )
\r
4455 bool methodResult = FAILURE;
\r
4456 unsigned int captureDeviceCount = 0;
\r
4457 unsigned int renderDeviceCount = 0;
\r
4459 IMMDeviceCollection* captureDevices = NULL;
\r
4460 IMMDeviceCollection* renderDevices = NULL;
\r
4461 IMMDevice* devicePtr = NULL;
\r
4462 WAVEFORMATEX* deviceFormat = NULL;
\r
4463 unsigned int bufferBytes;
\r
4464 stream_.state = STREAM_STOPPED;
\r
4466 // create API Handle if not already created
\r
4467 if ( !stream_.apiHandle )
\r
4468 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4470 // Count capture devices
\r
4471 errorText_.clear();
\r
4472 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4473 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4474 if ( FAILED( hr ) ) {
\r
4475 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4479 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4480 if ( FAILED( hr ) ) {
\r
4481 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4485 // Count render devices
\r
4486 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4487 if ( FAILED( hr ) ) {
\r
4488 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4492 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4493 if ( FAILED( hr ) ) {
\r
4494 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4498 // validate device index
\r
4499 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4500 errorType = RtAudioError::INVALID_USE;
\r
4501 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4505 // determine whether index falls within capture or render devices
\r
4506 if ( device >= renderDeviceCount ) {
\r
4507 if ( mode != INPUT ) {
\r
4508 errorType = RtAudioError::INVALID_USE;
\r
4509 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4513 // retrieve captureAudioClient from devicePtr
\r
4514 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4516 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4517 if ( FAILED( hr ) ) {
\r
4518 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4522 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4523 NULL, ( void** ) &captureAudioClient );
\r
4524 if ( FAILED( hr ) ) {
\r
4525 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4529 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4530 if ( FAILED( hr ) ) {
\r
4531 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4535 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4536 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4539 if ( mode != OUTPUT ) {
\r
4540 errorType = RtAudioError::INVALID_USE;
\r
4541 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4545 // retrieve renderAudioClient from devicePtr
\r
4546 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4548 hr = renderDevices->Item( device, &devicePtr );
\r
4549 if ( FAILED( hr ) ) {
\r
4550 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4554 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4555 NULL, ( void** ) &renderAudioClient );
\r
4556 if ( FAILED( hr ) ) {
\r
4557 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4561 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4562 if ( FAILED( hr ) ) {
\r
4563 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4567 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4568 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4571 // fill stream data
\r
4572 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4573 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4574 stream_.mode = DUPLEX;
\r
4577 stream_.mode = mode;
\r
4580 stream_.device[mode] = device;
\r
4581 stream_.doByteSwap[mode] = false;
\r
4582 stream_.sampleRate = sampleRate;
\r
4583 stream_.bufferSize = *bufferSize;
\r
4584 stream_.nBuffers = 1;
\r
4585 stream_.nUserChannels[mode] = channels;
\r
4586 stream_.channelOffset[mode] = firstChannel;
\r
4587 stream_.userFormat = format;
\r
4588 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4590 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4591 stream_.userInterleaved = false;
\r
4593 stream_.userInterleaved = true;
\r
4594 stream_.deviceInterleaved[mode] = true;
\r
4596 // Set flags for buffer conversion.
\r
4597 stream_.doConvertBuffer[mode] = false;
\r
4598 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4599 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4600 stream_.doConvertBuffer[mode] = true;
\r
4601 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4602 stream_.nUserChannels[mode] > 1 )
\r
4603 stream_.doConvertBuffer[mode] = true;
\r
4605 if ( stream_.doConvertBuffer[mode] )
\r
4606 setConvertInfo( mode, 0 );
\r
4608 // Allocate necessary internal buffers
\r
4609 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4611 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4612 if ( !stream_.userBuffer[mode] ) {
\r
4613 errorType = RtAudioError::MEMORY_ERROR;
\r
4614 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4618 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4619 stream_.callbackInfo.priority = 15;
\r
4621 stream_.callbackInfo.priority = 0;
\r
4623 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4624 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4626 methodResult = SUCCESS;
\r
4630 SAFE_RELEASE( captureDevices );
\r
4631 SAFE_RELEASE( renderDevices );
\r
4632 SAFE_RELEASE( devicePtr );
\r
4633 CoTaskMemFree( deviceFormat );
\r
4635 // if method failed, close the stream
\r
4636 if ( methodResult == FAILURE )
\r
4639 if ( !errorText_.empty() )
\r
4640 error( errorType );
\r
4641 return methodResult;
\r
4644 //=============================================================================
\r
4646 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4649 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4654 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4657 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4662 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4665 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4670 //-----------------------------------------------------------------------------
\r
4672 void RtApiWasapi::wasapiThread()
\r
4674 // as this is a new thread, we must CoInitialize it
\r
4675 CoInitialize( NULL );
\r
4679 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4680 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4681 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4682 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4683 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4684 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4686 WAVEFORMATEX* captureFormat = NULL;
\r
4687 WAVEFORMATEX* renderFormat = NULL;
\r
4688 float captureSrRatio = 0.0f;
\r
4689 float renderSrRatio = 0.0f;
\r
4690 WasapiBuffer captureBuffer;
\r
4691 WasapiBuffer renderBuffer;
\r
4693 // declare local stream variables
\r
4694 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4695 BYTE* streamBuffer = NULL;
\r
4696 unsigned long captureFlags = 0;
\r
4697 unsigned int bufferFrameCount = 0;
\r
4698 unsigned int numFramesPadding = 0;
\r
4699 unsigned int convBufferSize = 0;
\r
4700 bool callbackPushed = false;
\r
4701 bool callbackPulled = false;
\r
4702 bool callbackStopped = false;
\r
4703 int callbackResult = 0;
\r
4705 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4706 char* convBuffer = NULL;
\r
4707 unsigned int convBuffSize = 0;
\r
4708 unsigned int deviceBuffSize = 0;
\r
4710 errorText_.clear();
\r
4711 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4713 // Attempt to assign "Pro Audio" characteristic to thread
\r
4714 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4716 DWORD taskIndex = 0;
\r
4717 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4718 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4719 FreeLibrary( AvrtDll );
\r
4722 // start capture stream if applicable
\r
4723 if ( captureAudioClient ) {
\r
4724 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4725 if ( FAILED( hr ) ) {
\r
4726 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4730 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4732 // initialize capture stream according to desire buffer size
\r
4733 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4734 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4736 if ( !captureClient ) {
\r
4737 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4738 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4739 desiredBufferPeriod,
\r
4740 desiredBufferPeriod,
\r
4743 if ( FAILED( hr ) ) {
\r
4744 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4748 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4749 ( void** ) &captureClient );
\r
4750 if ( FAILED( hr ) ) {
\r
4751 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4755 // configure captureEvent to trigger on every available capture buffer
\r
4756 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4757 if ( !captureEvent ) {
\r
4758 errorType = RtAudioError::SYSTEM_ERROR;
\r
4759 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4763 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4764 if ( FAILED( hr ) ) {
\r
4765 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4769 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4770 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4773 unsigned int inBufferSize = 0;
\r
4774 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4775 if ( FAILED( hr ) ) {
\r
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4780 // scale outBufferSize according to stream->user sample rate ratio
\r
4781 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4782 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4784 // set captureBuffer size
\r
4785 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4787 // reset the capture stream
\r
4788 hr = captureAudioClient->Reset();
\r
4789 if ( FAILED( hr ) ) {
\r
4790 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4794 // start the capture stream
\r
4795 hr = captureAudioClient->Start();
\r
4796 if ( FAILED( hr ) ) {
\r
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4802 // start render stream if applicable
\r
4803 if ( renderAudioClient ) {
\r
4804 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4805 if ( FAILED( hr ) ) {
\r
4806 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4810 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4812 // initialize render stream according to desire buffer size
\r
4813 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4814 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4816 if ( !renderClient ) {
\r
4817 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4818 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4819 desiredBufferPeriod,
\r
4820 desiredBufferPeriod,
\r
4823 if ( FAILED( hr ) ) {
\r
4824 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4828 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4829 ( void** ) &renderClient );
\r
4830 if ( FAILED( hr ) ) {
\r
4831 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4835 // configure renderEvent to trigger on every available render buffer
\r
4836 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4837 if ( !renderEvent ) {
\r
4838 errorType = RtAudioError::SYSTEM_ERROR;
\r
4839 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4843 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4844 if ( FAILED( hr ) ) {
\r
4845 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4849 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4850 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4853 unsigned int outBufferSize = 0;
\r
4854 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4855 if ( FAILED( hr ) ) {
\r
4856 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4860 // scale inBufferSize according to user->stream sample rate ratio
\r
4861 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4862 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4864 // set renderBuffer size
\r
4865 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4867 // reset the render stream
\r
4868 hr = renderAudioClient->Reset();
\r
4869 if ( FAILED( hr ) ) {
\r
4870 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4874 // start the render stream
\r
4875 hr = renderAudioClient->Start();
\r
4876 if ( FAILED( hr ) ) {
\r
4877 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4882 if ( stream_.mode == INPUT ) {
\r
4883 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4884 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4886 else if ( stream_.mode == OUTPUT ) {
\r
4887 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4888 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4890 else if ( stream_.mode == DUPLEX ) {
\r
4891 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4892 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4893 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4894 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4897 convBuffer = ( char* ) malloc( convBuffSize );
\r
4898 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4899 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4900 errorType = RtAudioError::MEMORY_ERROR;
\r
4901 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4905 // stream process loop
\r
4906 while ( stream_.state != STREAM_STOPPING ) {
\r
4907 if ( !callbackPulled ) {
\r
4910 // 1. Pull callback buffer from inputBuffer
\r
4911 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4912 // Convert callback buffer to user format
\r
4914 if ( captureAudioClient ) {
\r
4915 // Pull callback buffer from inputBuffer
\r
4916 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4917 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4918 stream_.deviceFormat[INPUT] );
\r
4920 if ( callbackPulled ) {
\r
4921 // Convert callback buffer to user sample rate
\r
4922 convertBufferWasapi( stream_.deviceBuffer,
\r
4924 stream_.nDeviceChannels[INPUT],
\r
4925 captureFormat->nSamplesPerSec,
\r
4926 stream_.sampleRate,
\r
4927 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4929 stream_.deviceFormat[INPUT] );
\r
4931 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4932 // Convert callback buffer to user format
\r
4933 convertBuffer( stream_.userBuffer[INPUT],
\r
4934 stream_.deviceBuffer,
\r
4935 stream_.convertInfo[INPUT] );
\r
4938 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4939 memcpy( stream_.userBuffer[INPUT],
\r
4940 stream_.deviceBuffer,
\r
4941 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4946 // if there is no capture stream, set callbackPulled flag
\r
4947 callbackPulled = true;
\r
4950 // Execute Callback
\r
4951 // ================
\r
4952 // 1. Execute user callback method
\r
4953 // 2. Handle return value from callback
\r
4955 // if callback has not requested the stream to stop
\r
4956 if ( callbackPulled && !callbackStopped ) {
\r
4957 // Execute user callback method
\r
4958 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4959 stream_.userBuffer[INPUT],
\r
4960 stream_.bufferSize,
\r
4962 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4963 stream_.callbackInfo.userData );
\r
4965 // Handle return value from callback
\r
4966 if ( callbackResult == 1 ) {
\r
4967 // instantiate a thread to stop this thread
\r
4968 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4969 if ( !threadHandle ) {
\r
4970 errorType = RtAudioError::THREAD_ERROR;
\r
4971 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4974 else if ( !CloseHandle( threadHandle ) ) {
\r
4975 errorType = RtAudioError::THREAD_ERROR;
\r
4976 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4980 callbackStopped = true;
\r
4982 else if ( callbackResult == 2 ) {
\r
4983 // instantiate a thread to stop this thread
\r
4984 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4985 if ( !threadHandle ) {
\r
4986 errorType = RtAudioError::THREAD_ERROR;
\r
4987 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4990 else if ( !CloseHandle( threadHandle ) ) {
\r
4991 errorType = RtAudioError::THREAD_ERROR;
\r
4992 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4996 callbackStopped = true;
\r
5001 // Callback Output
\r
5002 // ===============
\r
5003 // 1. Convert callback buffer to stream format
\r
5004 // 2. Convert callback buffer to stream sample rate and channel count
\r
5005 // 3. Push callback buffer into outputBuffer
\r
5007 if ( renderAudioClient && callbackPulled ) {
\r
5008 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5009 // Convert callback buffer to stream format
\r
5010 convertBuffer( stream_.deviceBuffer,
\r
5011 stream_.userBuffer[OUTPUT],
\r
5012 stream_.convertInfo[OUTPUT] );
\r
5016 // Convert callback buffer to stream sample rate
\r
5017 convertBufferWasapi( convBuffer,
\r
5018 stream_.deviceBuffer,
\r
5019 stream_.nDeviceChannels[OUTPUT],
\r
5020 stream_.sampleRate,
\r
5021 renderFormat->nSamplesPerSec,
\r
5022 stream_.bufferSize,
\r
5024 stream_.deviceFormat[OUTPUT] );
\r
5026 // Push callback buffer into outputBuffer
\r
5027 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5028 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5029 stream_.deviceFormat[OUTPUT] );
\r
5032 // if there is no render stream, set callbackPushed flag
\r
5033 callbackPushed = true;
\r
5038 // 1. Get capture buffer from stream
\r
5039 // 2. Push capture buffer into inputBuffer
\r
5040 // 3. If 2. was successful: Release capture buffer
\r
5042 if ( captureAudioClient ) {
\r
5043 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5044 if ( !callbackPulled ) {
\r
5045 WaitForSingleObject( captureEvent, INFINITE );
\r
5048 // Get capture buffer from stream
\r
5049 hr = captureClient->GetBuffer( &streamBuffer,
\r
5050 &bufferFrameCount,
\r
5051 &captureFlags, NULL, NULL );
\r
5052 if ( FAILED( hr ) ) {
\r
5053 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5057 if ( bufferFrameCount != 0 ) {
\r
5058 // Push capture buffer into inputBuffer
\r
5059 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5060 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5061 stream_.deviceFormat[INPUT] ) )
\r
5063 // Release capture buffer
\r
5064 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5065 if ( FAILED( hr ) ) {
\r
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5072 // Inform WASAPI that capture was unsuccessful
\r
5073 hr = captureClient->ReleaseBuffer( 0 );
\r
5074 if ( FAILED( hr ) ) {
\r
5075 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5082 // Inform WASAPI that capture was unsuccessful
\r
5083 hr = captureClient->ReleaseBuffer( 0 );
\r
5084 if ( FAILED( hr ) ) {
\r
5085 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5093 // 1. Get render buffer from stream
\r
5094 // 2. Pull next buffer from outputBuffer
\r
5095 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5096 // Release render buffer
\r
5098 if ( renderAudioClient ) {
\r
5099 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5100 if ( callbackPulled && !callbackPushed ) {
\r
5101 WaitForSingleObject( renderEvent, INFINITE );
\r
5104 // Get render buffer from stream
\r
5105 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5106 if ( FAILED( hr ) ) {
\r
5107 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5111 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5112 if ( FAILED( hr ) ) {
\r
5113 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5117 bufferFrameCount -= numFramesPadding;
\r
5119 if ( bufferFrameCount != 0 ) {
\r
5120 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5121 if ( FAILED( hr ) ) {
\r
5122 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5126 // Pull next buffer from outputBuffer
\r
5127 // Fill render buffer with next buffer
\r
5128 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5129 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5130 stream_.deviceFormat[OUTPUT] ) )
\r
5132 // Release render buffer
\r
5133 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5134 if ( FAILED( hr ) ) {
\r
5135 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5141 // Inform WASAPI that render was unsuccessful
\r
5142 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5143 if ( FAILED( hr ) ) {
\r
5144 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5151 // Inform WASAPI that render was unsuccessful
\r
5152 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5153 if ( FAILED( hr ) ) {
\r
5154 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5160 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5161 if ( callbackPushed ) {
\r
5162 callbackPulled = false;
\r
5163 // tick stream time
\r
5164 RtApi::tickStreamTime();
\r
5171 CoTaskMemFree( captureFormat );
\r
5172 CoTaskMemFree( renderFormat );
\r
5174 free ( convBuffer );
\r
5178 // update stream state
\r
5179 stream_.state = STREAM_STOPPED;
\r
5181 if ( errorText_.empty() )
\r
5184 error( errorType );
\r
5187 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5191 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5193 // Modified by Robin Davies, October 2005
\r
5194 // - Improvements to DirectX pointer chasing.
\r
5195 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5196 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5197 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5198 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5200 #include <dsound.h>
\r
5201 #include <assert.h>
\r
5202 #include <algorithm>
\r
5204 #if defined(__MINGW32__)
\r
5205 // missing from latest mingw winapi
\r
5206 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5207 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5208 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5209 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5212 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5214 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5215 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5218 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5220 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5221 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5222 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5223 return pointer >= earlierPointer && pointer < laterPointer;
\r
5226 // A structure to hold various information related to the DirectSound
\r
5227 // API implementation.
\r
5229 unsigned int drainCounter; // Tracks callback counts when draining
\r
5230 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5234 UINT bufferPointer[2];
\r
5235 DWORD dsBufferSize[2];
\r
5236 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5240 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5243 // Declarations for utility functions, callbacks, and structures
\r
5244 // specific to the DirectSound implementation.
\r
5245 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5246 LPCTSTR description,
\r
5248 LPVOID lpContext );
\r
5250 static const char* getErrorString( int code );
\r
5252 static unsigned __stdcall callbackHandler( void *ptr );
\r
5261 : found(false) { validId[0] = false; validId[1] = false; }
\r
5264 struct DsProbeData {
\r
5266 std::vector<struct DsDevice>* dsDevices;
\r
5269 RtApiDs :: RtApiDs()
\r
5271 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5272 // accept whatever the mainline chose for a threading model.
\r
5273 coInitialized_ = false;
\r
5274 HRESULT hr = CoInitialize( NULL );
\r
5275 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5278 RtApiDs :: ~RtApiDs()
\r
5280 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5281 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5284 // The DirectSound default output is always the first device.
\r
5285 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5290 // The DirectSound default input is always the first input device,
\r
5291 // which is the first capture device enumerated.
\r
5292 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5297 unsigned int RtApiDs :: getDeviceCount( void )
\r
5299 // Set query flag for previously found devices to false, so that we
\r
5300 // can check for any devices that have disappeared.
\r
5301 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5302 dsDevices[i].found = false;
\r
5304 // Query DirectSound devices.
\r
5305 struct DsProbeData probeInfo;
\r
5306 probeInfo.isInput = false;
\r
5307 probeInfo.dsDevices = &dsDevices;
\r
5308 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5309 if ( FAILED( result ) ) {
\r
5310 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5311 errorText_ = errorStream_.str();
\r
5312 error( RtAudioError::WARNING );
\r
5315 // Query DirectSoundCapture devices.
\r
5316 probeInfo.isInput = true;
\r
5317 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5318 if ( FAILED( result ) ) {
\r
5319 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5320 errorText_ = errorStream_.str();
\r
5321 error( RtAudioError::WARNING );
\r
5324 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5325 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5326 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5330 return static_cast<unsigned int>(dsDevices.size());
\r
5333 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5335 RtAudio::DeviceInfo info;
\r
5336 info.probed = false;
\r
5338 if ( dsDevices.size() == 0 ) {
\r
5339 // Force a query of all devices
\r
5341 if ( dsDevices.size() == 0 ) {
\r
5342 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5343 error( RtAudioError::INVALID_USE );
\r
5348 if ( device >= dsDevices.size() ) {
\r
5349 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5350 error( RtAudioError::INVALID_USE );
\r
5355 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5357 LPDIRECTSOUND output;
\r
5359 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5360 if ( FAILED( result ) ) {
\r
5361 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5362 errorText_ = errorStream_.str();
\r
5363 error( RtAudioError::WARNING );
\r
5367 outCaps.dwSize = sizeof( outCaps );
\r
5368 result = output->GetCaps( &outCaps );
\r
5369 if ( FAILED( result ) ) {
\r
5370 output->Release();
\r
5371 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5372 errorText_ = errorStream_.str();
\r
5373 error( RtAudioError::WARNING );
\r
5377 // Get output channel information.
\r
5378 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5380 // Get sample rate information.
\r
5381 info.sampleRates.clear();
\r
5382 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5383 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5384 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5385 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5387 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5388 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5392 // Get format information.
\r
5393 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5394 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5396 output->Release();
\r
5398 if ( getDefaultOutputDevice() == device )
\r
5399 info.isDefaultOutput = true;
\r
5401 if ( dsDevices[ device ].validId[1] == false ) {
\r
5402 info.name = dsDevices[ device ].name;
\r
5403 info.probed = true;
\r
5409 LPDIRECTSOUNDCAPTURE input;
\r
5410 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5411 if ( FAILED( result ) ) {
\r
5412 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5413 errorText_ = errorStream_.str();
\r
5414 error( RtAudioError::WARNING );
\r
5419 inCaps.dwSize = sizeof( inCaps );
\r
5420 result = input->GetCaps( &inCaps );
\r
5421 if ( FAILED( result ) ) {
\r
5423 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5424 errorText_ = errorStream_.str();
\r
5425 error( RtAudioError::WARNING );
\r
5429 // Get input channel information.
\r
5430 info.inputChannels = inCaps.dwChannels;
\r
5432 // Get sample rate and format information.
\r
5433 std::vector<unsigned int> rates;
\r
5434 if ( inCaps.dwChannels >= 2 ) {
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5441 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5442 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5444 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5446 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5447 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5448 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5450 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5453 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5454 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5457 else if ( inCaps.dwChannels == 1 ) {
\r
5458 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5459 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5460 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5461 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5462 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5463 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5464 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5465 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5467 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5468 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5469 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5470 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5471 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5473 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5474 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5475 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5476 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5477 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5480 else info.inputChannels = 0; // technically, this would be an error
\r
5484 if ( info.inputChannels == 0 ) return info;
\r
5486 // Copy the supported rates to the info structure but avoid duplication.
\r
5488 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5490 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5491 if ( rates[i] == info.sampleRates[j] ) {
\r
5496 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5498 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5500 // If device opens for both playback and capture, we determine the channels.
\r
5501 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5502 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5504 if ( device == 0 ) info.isDefaultInput = true;
\r
5506 // Copy name and return.
\r
5507 info.name = dsDevices[ device ].name;
\r
5508 info.probed = true;
\r
5512 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5513 unsigned int firstChannel, unsigned int sampleRate,
\r
5514 RtAudioFormat format, unsigned int *bufferSize,
\r
5515 RtAudio::StreamOptions *options )
\r
5517 if ( channels + firstChannel > 2 ) {
\r
5518 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5522 size_t nDevices = dsDevices.size();
\r
5523 if ( nDevices == 0 ) {
\r
5524 // This should not happen because a check is made before this function is called.
\r
5525 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5529 if ( device >= nDevices ) {
\r
5530 // This should not happen because a check is made before this function is called.
\r
5531 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5535 if ( mode == OUTPUT ) {
\r
5536 if ( dsDevices[ device ].validId[0] == false ) {
\r
5537 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5538 errorText_ = errorStream_.str();
\r
5542 else { // mode == INPUT
\r
5543 if ( dsDevices[ device ].validId[1] == false ) {
\r
5544 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5545 errorText_ = errorStream_.str();
\r
5550 // According to a note in PortAudio, using GetDesktopWindow()
\r
5551 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5552 // that occur when the application's window is not the foreground
\r
5553 // window. Also, if the application window closes before the
\r
5554 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5555 // problems when using GetDesktopWindow() but it seems fine now
\r
5556 // (January 2010). I'll leave it commented here.
\r
5557 // HWND hWnd = GetForegroundWindow();
\r
5558 HWND hWnd = GetDesktopWindow();
\r
5560 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5561 // two. This is a judgement call and a value of two is probably too
\r
5562 // low for capture, but it should work for playback.
\r
5564 if ( options ) nBuffers = options->numberOfBuffers;
\r
5565 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5566 if ( nBuffers < 2 ) nBuffers = 3;
\r
5568 // Check the lower range of the user-specified buffer size and set
\r
5569 // (arbitrarily) to a lower bound of 32.
\r
5570 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5572 // Create the wave format structure. The data format setting will
\r
5573 // be determined later.
\r
5574 WAVEFORMATEX waveFormat;
\r
5575 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5576 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5577 waveFormat.nChannels = channels + firstChannel;
\r
5578 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5580 // Determine the device buffer size. By default, we'll use the value
\r
5581 // defined above (32K), but we will grow it to make allowances for
\r
5582 // very large software buffer sizes.
\r
5583 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5584 DWORD dsPointerLeadTime = 0;
\r
5586 void *ohandle = 0, *bhandle = 0;
\r
5588 if ( mode == OUTPUT ) {
\r
5590 LPDIRECTSOUND output;
\r
5591 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5592 if ( FAILED( result ) ) {
\r
5593 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5594 errorText_ = errorStream_.str();
\r
5599 outCaps.dwSize = sizeof( outCaps );
\r
5600 result = output->GetCaps( &outCaps );
\r
5601 if ( FAILED( result ) ) {
\r
5602 output->Release();
\r
5603 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5604 errorText_ = errorStream_.str();
\r
5608 // Check channel information.
\r
5609 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5610 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5611 errorText_ = errorStream_.str();
\r
5615 // Check format information. Use 16-bit format unless not
\r
5616 // supported or user requests 8-bit.
\r
5617 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5618 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5619 waveFormat.wBitsPerSample = 16;
\r
5620 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5623 waveFormat.wBitsPerSample = 8;
\r
5624 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5626 stream_.userFormat = format;
\r
5628 // Update wave format structure and buffer information.
\r
5629 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5630 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5631 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5633 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5634 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5635 dsBufferSize *= 2;
\r
5637 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5638 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5639 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5640 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5641 if ( FAILED( result ) ) {
\r
5642 output->Release();
\r
5643 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5644 errorText_ = errorStream_.str();
\r
5648 // Even though we will write to the secondary buffer, we need to
\r
5649 // access the primary buffer to set the correct output format
\r
5650 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5651 // buffer description.
\r
5652 DSBUFFERDESC bufferDescription;
\r
5653 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5654 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5655 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5657 // Obtain the primary buffer
\r
5658 LPDIRECTSOUNDBUFFER buffer;
\r
5659 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5660 if ( FAILED( result ) ) {
\r
5661 output->Release();
\r
5662 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5663 errorText_ = errorStream_.str();
\r
5667 // Set the primary DS buffer sound format.
\r
5668 result = buffer->SetFormat( &waveFormat );
\r
5669 if ( FAILED( result ) ) {
\r
5670 output->Release();
\r
5671 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5672 errorText_ = errorStream_.str();
\r
5676 // Setup the secondary DS buffer description.
\r
5677 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5678 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5679 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5680 DSBCAPS_GLOBALFOCUS |
\r
5681 DSBCAPS_GETCURRENTPOSITION2 |
\r
5682 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5683 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5684 bufferDescription.lpwfxFormat = &waveFormat;
\r
5686 // Try to create the secondary DS buffer. If that doesn't work,
\r
5687 // try to use software mixing. Otherwise, there's a problem.
\r
5688 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5689 if ( FAILED( result ) ) {
\r
5690 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5691 DSBCAPS_GLOBALFOCUS |
\r
5692 DSBCAPS_GETCURRENTPOSITION2 |
\r
5693 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5694 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5695 if ( FAILED( result ) ) {
\r
5696 output->Release();
\r
5697 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5698 errorText_ = errorStream_.str();
\r
5703 // Get the buffer size ... might be different from what we specified.
\r
5705 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5706 result = buffer->GetCaps( &dsbcaps );
\r
5707 if ( FAILED( result ) ) {
\r
5708 output->Release();
\r
5709 buffer->Release();
\r
5710 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5711 errorText_ = errorStream_.str();
\r
5715 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5717 // Lock the DS buffer
\r
5720 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5721 if ( FAILED( result ) ) {
\r
5722 output->Release();
\r
5723 buffer->Release();
\r
5724 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5725 errorText_ = errorStream_.str();
\r
5729 // Zero the DS buffer
\r
5730 ZeroMemory( audioPtr, dataLen );
\r
5732 // Unlock the DS buffer
\r
5733 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5734 if ( FAILED( result ) ) {
\r
5735 output->Release();
\r
5736 buffer->Release();
\r
5737 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5738 errorText_ = errorStream_.str();
\r
5742 ohandle = (void *) output;
\r
5743 bhandle = (void *) buffer;
\r
5746 if ( mode == INPUT ) {
\r
5748 LPDIRECTSOUNDCAPTURE input;
\r
5749 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5750 if ( FAILED( result ) ) {
\r
5751 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5752 errorText_ = errorStream_.str();
\r
5757 inCaps.dwSize = sizeof( inCaps );
\r
5758 result = input->GetCaps( &inCaps );
\r
5759 if ( FAILED( result ) ) {
\r
5761 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5762 errorText_ = errorStream_.str();
\r
5766 // Check channel information.
\r
5767 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5768 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5772 // Check format information. Use 16-bit format unless user
\r
5773 // requests 8-bit.
\r
5774 DWORD deviceFormats;
\r
5775 if ( channels + firstChannel == 2 ) {
\r
5776 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5777 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5778 waveFormat.wBitsPerSample = 8;
\r
5779 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5781 else { // assume 16-bit is supported
\r
5782 waveFormat.wBitsPerSample = 16;
\r
5783 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5786 else { // channel == 1
\r
5787 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5788 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5789 waveFormat.wBitsPerSample = 8;
\r
5790 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5792 else { // assume 16-bit is supported
\r
5793 waveFormat.wBitsPerSample = 16;
\r
5794 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5797 stream_.userFormat = format;
\r
5799 // Update wave format structure and buffer information.
\r
5800 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5801 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5802 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5804 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5805 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5806 dsBufferSize *= 2;
\r
5808 // Setup the secondary DS buffer description.
\r
5809 DSCBUFFERDESC bufferDescription;
\r
5810 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5811 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5812 bufferDescription.dwFlags = 0;
\r
5813 bufferDescription.dwReserved = 0;
\r
5814 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5815 bufferDescription.lpwfxFormat = &waveFormat;
\r
5817 // Create the capture buffer.
\r
5818 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5819 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5820 if ( FAILED( result ) ) {
\r
5822 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5823 errorText_ = errorStream_.str();
\r
5827 // Get the buffer size ... might be different from what we specified.
\r
5828 DSCBCAPS dscbcaps;
\r
5829 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5830 result = buffer->GetCaps( &dscbcaps );
\r
5831 if ( FAILED( result ) ) {
\r
5833 buffer->Release();
\r
5834 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5835 errorText_ = errorStream_.str();
\r
5839 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5841 // NOTE: We could have a problem here if this is a duplex stream
\r
5842 // and the play and capture hardware buffer sizes are different
\r
5843 // (I'm actually not sure if that is a problem or not).
\r
5844 // Currently, we are not verifying that.
\r
5846 // Lock the capture buffer
\r
5849 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5850 if ( FAILED( result ) ) {
\r
5852 buffer->Release();
\r
5853 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5854 errorText_ = errorStream_.str();
\r
5858 // Zero the buffer
\r
5859 ZeroMemory( audioPtr, dataLen );
\r
5861 // Unlock the buffer
\r
5862 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5863 if ( FAILED( result ) ) {
\r
5865 buffer->Release();
\r
5866 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5867 errorText_ = errorStream_.str();
\r
5871 ohandle = (void *) input;
\r
5872 bhandle = (void *) buffer;
\r
5875 // Set various stream parameters
\r
5876 DsHandle *handle = 0;
\r
5877 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5878 stream_.nUserChannels[mode] = channels;
\r
5879 stream_.bufferSize = *bufferSize;
\r
5880 stream_.channelOffset[mode] = firstChannel;
\r
5881 stream_.deviceInterleaved[mode] = true;
\r
5882 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5883 else stream_.userInterleaved = true;
\r
5885 // Set flag for buffer conversion
\r
5886 stream_.doConvertBuffer[mode] = false;
\r
5887 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5888 stream_.doConvertBuffer[mode] = true;
\r
5889 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5890 stream_.doConvertBuffer[mode] = true;
\r
5891 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5892 stream_.nUserChannels[mode] > 1 )
\r
5893 stream_.doConvertBuffer[mode] = true;
\r
5895 // Allocate necessary internal buffers
\r
5896 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5897 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5898 if ( stream_.userBuffer[mode] == NULL ) {
\r
5899 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5903 if ( stream_.doConvertBuffer[mode] ) {
\r
5905 bool makeBuffer = true;
\r
5906 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5907 if ( mode == INPUT ) {
\r
5908 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5909 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5910 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5914 if ( makeBuffer ) {
\r
5915 bufferBytes *= *bufferSize;
\r
5916 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5917 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5918 if ( stream_.deviceBuffer == NULL ) {
\r
5919 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5925 // Allocate our DsHandle structures for the stream.
\r
5926 if ( stream_.apiHandle == 0 ) {
\r
5928 handle = new DsHandle;
\r
5930 catch ( std::bad_alloc& ) {
\r
5931 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5935 // Create a manual-reset event.
\r
5936 handle->condition = CreateEvent( NULL, // no security
\r
5937 TRUE, // manual-reset
\r
5938 FALSE, // non-signaled initially
\r
5939 NULL ); // unnamed
\r
5940 stream_.apiHandle = (void *) handle;
\r
5943 handle = (DsHandle *) stream_.apiHandle;
\r
5944 handle->id[mode] = ohandle;
\r
5945 handle->buffer[mode] = bhandle;
\r
5946 handle->dsBufferSize[mode] = dsBufferSize;
\r
5947 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5949 stream_.device[mode] = device;
\r
5950 stream_.state = STREAM_STOPPED;
\r
5951 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5952 // We had already set up an output stream.
\r
5953 stream_.mode = DUPLEX;
\r
5955 stream_.mode = mode;
\r
5956 stream_.nBuffers = nBuffers;
\r
5957 stream_.sampleRate = sampleRate;
\r
5959 // Setup the buffer conversion information structure.
\r
5960 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5962 // Setup the callback thread.
\r
5963 if ( stream_.callbackInfo.isRunning == false ) {
\r
5964 unsigned threadId;
\r
5965 stream_.callbackInfo.isRunning = true;
\r
5966 stream_.callbackInfo.object = (void *) this;
\r
5967 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5968 &stream_.callbackInfo, 0, &threadId );
\r
5969 if ( stream_.callbackInfo.thread == 0 ) {
\r
5970 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5974 // Boost DS thread priority
\r
5975 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5981 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5982 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5983 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5984 if ( buffer ) buffer->Release();
\r
5985 object->Release();
\r
5987 if ( handle->buffer[1] ) {
\r
5988 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5989 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5990 if ( buffer ) buffer->Release();
\r
5991 object->Release();
\r
5993 CloseHandle( handle->condition );
\r
5995 stream_.apiHandle = 0;
\r
5998 for ( int i=0; i<2; i++ ) {
\r
5999 if ( stream_.userBuffer[i] ) {
\r
6000 free( stream_.userBuffer[i] );
\r
6001 stream_.userBuffer[i] = 0;
\r
6005 if ( stream_.deviceBuffer ) {
\r
6006 free( stream_.deviceBuffer );
\r
6007 stream_.deviceBuffer = 0;
\r
6010 stream_.state = STREAM_CLOSED;
\r
6014 void RtApiDs :: closeStream()
\r
6016 if ( stream_.state == STREAM_CLOSED ) {
\r
6017 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6018 error( RtAudioError::WARNING );
\r
6022 // Stop the callback thread.
\r
6023 stream_.callbackInfo.isRunning = false;
\r
6024 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6025 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6027 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6029 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6030 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6031 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6034 buffer->Release();
\r
6036 object->Release();
\r
6038 if ( handle->buffer[1] ) {
\r
6039 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6040 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6043 buffer->Release();
\r
6045 object->Release();
\r
6047 CloseHandle( handle->condition );
\r
6049 stream_.apiHandle = 0;
\r
6052 for ( int i=0; i<2; i++ ) {
\r
6053 if ( stream_.userBuffer[i] ) {
\r
6054 free( stream_.userBuffer[i] );
\r
6055 stream_.userBuffer[i] = 0;
\r
6059 if ( stream_.deviceBuffer ) {
\r
6060 free( stream_.deviceBuffer );
\r
6061 stream_.deviceBuffer = 0;
\r
6064 stream_.mode = UNINITIALIZED;
\r
6065 stream_.state = STREAM_CLOSED;
\r
6068 void RtApiDs :: startStream()
\r
6071 if ( stream_.state == STREAM_RUNNING ) {
\r
6072 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6073 error( RtAudioError::WARNING );
\r
6077 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6079 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6080 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6081 // this is already in effect.
\r
6082 timeBeginPeriod( 1 );
\r
6084 buffersRolling = false;
\r
6085 duplexPrerollBytes = 0;
\r
6087 if ( stream_.mode == DUPLEX ) {
\r
6088 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6089 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6092 HRESULT result = 0;
\r
6093 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6095 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6096 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6097 if ( FAILED( result ) ) {
\r
6098 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6099 errorText_ = errorStream_.str();
\r
6104 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6106 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6107 result = buffer->Start( DSCBSTART_LOOPING );
\r
6108 if ( FAILED( result ) ) {
\r
6109 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6110 errorText_ = errorStream_.str();
\r
6115 handle->drainCounter = 0;
\r
6116 handle->internalDrain = false;
\r
6117 ResetEvent( handle->condition );
\r
6118 stream_.state = STREAM_RUNNING;
\r
6121 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6124 void RtApiDs :: stopStream()
\r
6127 if ( stream_.state == STREAM_STOPPED ) {
\r
6128 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6129 error( RtAudioError::WARNING );
\r
6133 HRESULT result = 0;
\r
6136 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6137 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6138 if ( handle->drainCounter == 0 ) {
\r
6139 handle->drainCounter = 2;
\r
6140 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6143 stream_.state = STREAM_STOPPED;
\r
6145 MUTEX_LOCK( &stream_.mutex );
\r
6147 // Stop the buffer and clear memory
\r
6148 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6149 result = buffer->Stop();
\r
6150 if ( FAILED( result ) ) {
\r
6151 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6152 errorText_ = errorStream_.str();
\r
6156 // Lock the buffer and clear it so that if we start to play again,
\r
6157 // we won't have old data playing.
\r
6158 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6159 if ( FAILED( result ) ) {
\r
6160 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6161 errorText_ = errorStream_.str();
\r
6165 // Zero the DS buffer
\r
6166 ZeroMemory( audioPtr, dataLen );
\r
6168 // Unlock the DS buffer
\r
6169 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6170 if ( FAILED( result ) ) {
\r
6171 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6172 errorText_ = errorStream_.str();
\r
6176 // If we start playing again, we must begin at beginning of buffer.
\r
6177 handle->bufferPointer[0] = 0;
\r
6180 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6181 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6185 stream_.state = STREAM_STOPPED;
\r
6187 if ( stream_.mode != DUPLEX )
\r
6188 MUTEX_LOCK( &stream_.mutex );
\r
6190 result = buffer->Stop();
\r
6191 if ( FAILED( result ) ) {
\r
6192 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6193 errorText_ = errorStream_.str();
\r
6197 // Lock the buffer and clear it so that if we start to play again,
\r
6198 // we won't have old data playing.
\r
6199 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6200 if ( FAILED( result ) ) {
\r
6201 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6202 errorText_ = errorStream_.str();
\r
6206 // Zero the DS buffer
\r
6207 ZeroMemory( audioPtr, dataLen );
\r
6209 // Unlock the DS buffer
\r
6210 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6211 if ( FAILED( result ) ) {
\r
6212 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6213 errorText_ = errorStream_.str();
\r
6217 // If we start recording again, we must begin at beginning of buffer.
\r
6218 handle->bufferPointer[1] = 0;
\r
6222 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6223 MUTEX_UNLOCK( &stream_.mutex );
\r
6225 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6228 void RtApiDs :: abortStream()
\r
6231 if ( stream_.state == STREAM_STOPPED ) {
\r
6232 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6233 error( RtAudioError::WARNING );
\r
6237 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6238 handle->drainCounter = 2;
\r
6243 void RtApiDs :: callbackEvent()
\r
6245 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6246 Sleep( 50 ); // sleep 50 milliseconds
\r
6250 if ( stream_.state == STREAM_CLOSED ) {
\r
6251 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6252 error( RtAudioError::WARNING );
\r
6256 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6257 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6259 // Check if we were draining the stream and signal is finished.
\r
6260 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6262 stream_.state = STREAM_STOPPING;
\r
6263 if ( handle->internalDrain == false )
\r
6264 SetEvent( handle->condition );
\r
6270 // Invoke user callback to get fresh output data UNLESS we are
\r
6271 // draining stream.
\r
6272 if ( handle->drainCounter == 0 ) {
\r
6273 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6274 double streamTime = getStreamTime();
\r
6275 RtAudioStreamStatus status = 0;
\r
6276 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6277 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6278 handle->xrun[0] = false;
\r
6280 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6281 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6282 handle->xrun[1] = false;
\r
6284 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6285 stream_.bufferSize, streamTime, status, info->userData );
\r
6286 if ( cbReturnValue == 2 ) {
\r
6287 stream_.state = STREAM_STOPPING;
\r
6288 handle->drainCounter = 2;
\r
6292 else if ( cbReturnValue == 1 ) {
\r
6293 handle->drainCounter = 1;
\r
6294 handle->internalDrain = true;
\r
6299 DWORD currentWritePointer, safeWritePointer;
\r
6300 DWORD currentReadPointer, safeReadPointer;
\r
6301 UINT nextWritePointer;
\r
6303 LPVOID buffer1 = NULL;
\r
6304 LPVOID buffer2 = NULL;
\r
6305 DWORD bufferSize1 = 0;
\r
6306 DWORD bufferSize2 = 0;
\r
6311 MUTEX_LOCK( &stream_.mutex );
\r
6312 if ( stream_.state == STREAM_STOPPED ) {
\r
6313 MUTEX_UNLOCK( &stream_.mutex );
\r
6317 if ( buffersRolling == false ) {
\r
6318 if ( stream_.mode == DUPLEX ) {
\r
6319 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6321 // It takes a while for the devices to get rolling. As a result,
\r
6322 // there's no guarantee that the capture and write device pointers
\r
6323 // will move in lockstep. Wait here for both devices to start
\r
6324 // rolling, and then set our buffer pointers accordingly.
\r
6325 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6326 // bytes later than the write buffer.
\r
6328 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6329 // take place between the two GetCurrentPosition calls... but I'm
\r
6330 // really not sure how to solve the problem. Temporarily boost to
\r
6331 // Realtime priority, maybe; but I'm not sure what priority the
\r
6332 // DirectSound service threads run at. We *should* be roughly
\r
6333 // within a ms or so of correct.
\r
6335 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6336 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6338 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6340 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6341 if ( FAILED( result ) ) {
\r
6342 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6343 errorText_ = errorStream_.str();
\r
6344 MUTEX_UNLOCK( &stream_.mutex );
\r
6345 error( RtAudioError::SYSTEM_ERROR );
\r
6348 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6349 if ( FAILED( result ) ) {
\r
6350 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6351 errorText_ = errorStream_.str();
\r
6352 MUTEX_UNLOCK( &stream_.mutex );
\r
6353 error( RtAudioError::SYSTEM_ERROR );
\r
6357 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6358 if ( FAILED( result ) ) {
\r
6359 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6360 errorText_ = errorStream_.str();
\r
6361 MUTEX_UNLOCK( &stream_.mutex );
\r
6362 error( RtAudioError::SYSTEM_ERROR );
\r
6365 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6366 if ( FAILED( result ) ) {
\r
6367 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6368 errorText_ = errorStream_.str();
\r
6369 MUTEX_UNLOCK( &stream_.mutex );
\r
6370 error( RtAudioError::SYSTEM_ERROR );
\r
6373 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6377 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6379 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6380 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6381 handle->bufferPointer[1] = safeReadPointer;
\r
6383 else if ( stream_.mode == OUTPUT ) {
\r
6385 // Set the proper nextWritePosition after initial startup.
\r
6386 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6387 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6388 if ( FAILED( result ) ) {
\r
6389 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6390 errorText_ = errorStream_.str();
\r
6391 MUTEX_UNLOCK( &stream_.mutex );
\r
6392 error( RtAudioError::SYSTEM_ERROR );
\r
6395 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6396 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6399 buffersRolling = true;
\r
6402 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6404 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6406 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6407 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6408 bufferBytes *= formatBytes( stream_.userFormat );
\r
6409 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6412 // Setup parameters and do buffer conversion if necessary.
\r
6413 if ( stream_.doConvertBuffer[0] ) {
\r
6414 buffer = stream_.deviceBuffer;
\r
6415 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6416 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6417 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6420 buffer = stream_.userBuffer[0];
\r
6421 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6422 bufferBytes *= formatBytes( stream_.userFormat );
\r
6425 // No byte swapping necessary in DirectSound implementation.
\r
6427 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6428 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6430 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6431 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6433 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6434 nextWritePointer = handle->bufferPointer[0];
\r
6436 DWORD endWrite, leadPointer;
\r
6438 // Find out where the read and "safe write" pointers are.
\r
6439 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6440 if ( FAILED( result ) ) {
\r
6441 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6442 errorText_ = errorStream_.str();
\r
6443 MUTEX_UNLOCK( &stream_.mutex );
\r
6444 error( RtAudioError::SYSTEM_ERROR );
\r
6448 // We will copy our output buffer into the region between
\r
6449 // safeWritePointer and leadPointer. If leadPointer is not
\r
6450 // beyond the next endWrite position, wait until it is.
\r
6451 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6452 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6453 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6454 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6455 endWrite = nextWritePointer + bufferBytes;
\r
6457 // Check whether the entire write region is behind the play pointer.
\r
6458 if ( leadPointer >= endWrite ) break;
\r
6460 // If we are here, then we must wait until the leadPointer advances
\r
6461 // beyond the end of our next write region. We use the
\r
6462 // Sleep() function to suspend operation until that happens.
\r
6463 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6464 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6465 if ( millis < 1.0 ) millis = 1.0;
\r
6466 Sleep( (DWORD) millis );
\r
6469 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6470 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6471 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6472 handle->xrun[0] = true;
\r
6473 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6474 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6475 handle->bufferPointer[0] = nextWritePointer;
\r
6476 endWrite = nextWritePointer + bufferBytes;
\r
6479 // Lock free space in the buffer
\r
6480 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6481 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6482 if ( FAILED( result ) ) {
\r
6483 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6484 errorText_ = errorStream_.str();
\r
6485 MUTEX_UNLOCK( &stream_.mutex );
\r
6486 error( RtAudioError::SYSTEM_ERROR );
\r
6490 // Copy our buffer into the DS buffer
\r
6491 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6492 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6494 // Update our buffer offset and unlock sound buffer
\r
6495 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6496 if ( FAILED( result ) ) {
\r
6497 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6498 errorText_ = errorStream_.str();
\r
6499 MUTEX_UNLOCK( &stream_.mutex );
\r
6500 error( RtAudioError::SYSTEM_ERROR );
\r
6503 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6504 handle->bufferPointer[0] = nextWritePointer;
\r
6507 // Don't bother draining input
\r
6508 if ( handle->drainCounter ) {
\r
6509 handle->drainCounter++;
\r
6513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6515 // Setup parameters.
\r
6516 if ( stream_.doConvertBuffer[1] ) {
\r
6517 buffer = stream_.deviceBuffer;
\r
6518 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6519 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6522 buffer = stream_.userBuffer[1];
\r
6523 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6524 bufferBytes *= formatBytes( stream_.userFormat );
\r
6527 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6528 long nextReadPointer = handle->bufferPointer[1];
\r
6529 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6531 // Find out where the write and "safe read" pointers are.
\r
6532 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6533 if ( FAILED( result ) ) {
\r
6534 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6535 errorText_ = errorStream_.str();
\r
6536 MUTEX_UNLOCK( &stream_.mutex );
\r
6537 error( RtAudioError::SYSTEM_ERROR );
\r
6541 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6542 DWORD endRead = nextReadPointer + bufferBytes;
\r
6544 // Handling depends on whether we are INPUT or DUPLEX.
\r
6545 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6546 // then a wait here will drag the write pointers into the forbidden zone.
\r
6548 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6549 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6550 // practical way to sync up the read and write pointers reliably, given the
\r
6551 // the very complex relationship between phase and increment of the read and write
\r
6554 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6555 // provide a pre-roll period of 0.5 seconds in which we return
\r
6556 // zeros from the read buffer while the pointers sync up.
\r
6558 if ( stream_.mode == DUPLEX ) {
\r
6559 if ( safeReadPointer < endRead ) {
\r
6560 if ( duplexPrerollBytes <= 0 ) {
\r
6561 // Pre-roll time over. Be more agressive.
\r
6562 int adjustment = endRead-safeReadPointer;
\r
6564 handle->xrun[1] = true;
\r
6566 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6567 // and perform fine adjustments later.
\r
6568 // - small adjustments: back off by twice as much.
\r
6569 if ( adjustment >= 2*bufferBytes )
\r
6570 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6572 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6574 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6578 // In pre=roll time. Just do it.
\r
6579 nextReadPointer = safeReadPointer - bufferBytes;
\r
6580 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6582 endRead = nextReadPointer + bufferBytes;
\r
6585 else { // mode == INPUT
\r
6586 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6587 // See comments for playback.
\r
6588 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6589 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6590 if ( millis < 1.0 ) millis = 1.0;
\r
6591 Sleep( (DWORD) millis );
\r
6593 // Wake up and find out where we are now.
\r
6594 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6595 if ( FAILED( result ) ) {
\r
6596 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6597 errorText_ = errorStream_.str();
\r
6598 MUTEX_UNLOCK( &stream_.mutex );
\r
6599 error( RtAudioError::SYSTEM_ERROR );
\r
6603 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6607 // Lock free space in the buffer
\r
6608 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6609 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6610 if ( FAILED( result ) ) {
\r
6611 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6612 errorText_ = errorStream_.str();
\r
6613 MUTEX_UNLOCK( &stream_.mutex );
\r
6614 error( RtAudioError::SYSTEM_ERROR );
\r
6618 if ( duplexPrerollBytes <= 0 ) {
\r
6619 // Copy our buffer into the DS buffer
\r
6620 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6621 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6624 memset( buffer, 0, bufferSize1 );
\r
6625 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6626 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6629 // Update our buffer offset and unlock sound buffer
\r
6630 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6631 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6632 if ( FAILED( result ) ) {
\r
6633 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6634 errorText_ = errorStream_.str();
\r
6635 MUTEX_UNLOCK( &stream_.mutex );
\r
6636 error( RtAudioError::SYSTEM_ERROR );
\r
6639 handle->bufferPointer[1] = nextReadPointer;
\r
6641 // No byte swapping necessary in DirectSound implementation.
\r
6643 // If necessary, convert 8-bit data from unsigned to signed.
\r
6644 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6645 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6647 // Do buffer conversion if necessary.
\r
6648 if ( stream_.doConvertBuffer[1] )
\r
6649 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6653 MUTEX_UNLOCK( &stream_.mutex );
\r
6654 RtApi::tickStreamTime();
\r
6657 // Definitions for utility functions and callbacks
\r
6658 // specific to the DirectSound implementation.
\r
6660 static unsigned __stdcall callbackHandler( void *ptr )
\r
6662 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6663 RtApiDs *object = (RtApiDs *) info->object;
\r
6664 bool* isRunning = &info->isRunning;
\r
6666 while ( *isRunning == true ) {
\r
6667 object->callbackEvent();
\r
6670 _endthreadex( 0 );
\r
6674 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6675 LPCTSTR description,
\r
6676 LPCTSTR /*module*/,
\r
6677 LPVOID lpContext )
\r
6679 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6680 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6683 bool validDevice = false;
\r
6684 if ( probeInfo.isInput == true ) {
\r
6686 LPDIRECTSOUNDCAPTURE object;
\r
6688 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6689 if ( hr != DS_OK ) return TRUE;
\r
6691 caps.dwSize = sizeof(caps);
\r
6692 hr = object->GetCaps( &caps );
\r
6693 if ( hr == DS_OK ) {
\r
6694 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6695 validDevice = true;
\r
6697 object->Release();
\r
6701 LPDIRECTSOUND object;
\r
6702 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6703 if ( hr != DS_OK ) return TRUE;
\r
6705 caps.dwSize = sizeof(caps);
\r
6706 hr = object->GetCaps( &caps );
\r
6707 if ( hr == DS_OK ) {
\r
6708 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6709 validDevice = true;
\r
6711 object->Release();
\r
6714 // If good device, then save its name and guid.
\r
6715 std::string name = convertCharPointerToStdString( description );
\r
6716 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6717 if ( lpguid == NULL )
\r
6718 name = "Default Device";
\r
6719 if ( validDevice ) {
\r
6720 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6721 if ( dsDevices[i].name == name ) {
\r
6722 dsDevices[i].found = true;
\r
6723 if ( probeInfo.isInput ) {
\r
6724 dsDevices[i].id[1] = lpguid;
\r
6725 dsDevices[i].validId[1] = true;
\r
6728 dsDevices[i].id[0] = lpguid;
\r
6729 dsDevices[i].validId[0] = true;
\r
6736 device.name = name;
\r
6737 device.found = true;
\r
6738 if ( probeInfo.isInput ) {
\r
6739 device.id[1] = lpguid;
\r
6740 device.validId[1] = true;
\r
6743 device.id[0] = lpguid;
\r
6744 device.validId[0] = true;
\r
6746 dsDevices.push_back( device );
\r
6752 static const char* getErrorString( int code )
\r
6756 case DSERR_ALLOCATED:
\r
6757 return "Already allocated";
\r
6759 case DSERR_CONTROLUNAVAIL:
\r
6760 return "Control unavailable";
\r
6762 case DSERR_INVALIDPARAM:
\r
6763 return "Invalid parameter";
\r
6765 case DSERR_INVALIDCALL:
\r
6766 return "Invalid call";
\r
6768 case DSERR_GENERIC:
\r
6769 return "Generic error";
\r
6771 case DSERR_PRIOLEVELNEEDED:
\r
6772 return "Priority level needed";
\r
6774 case DSERR_OUTOFMEMORY:
\r
6775 return "Out of memory";
\r
6777 case DSERR_BADFORMAT:
\r
6778 return "The sample rate or the channel format is not supported";
\r
6780 case DSERR_UNSUPPORTED:
\r
6781 return "Not supported";
\r
6783 case DSERR_NODRIVER:
\r
6784 return "No driver";
\r
6786 case DSERR_ALREADYINITIALIZED:
\r
6787 return "Already initialized";
\r
6789 case DSERR_NOAGGREGATION:
\r
6790 return "No aggregation";
\r
6792 case DSERR_BUFFERLOST:
\r
6793 return "Buffer lost";
\r
6795 case DSERR_OTHERAPPHASPRIO:
\r
6796 return "Another application already has priority";
\r
6798 case DSERR_UNINITIALIZED:
\r
6799 return "Uninitialized";
\r
6802 return "DirectSound unknown error";
\r
6805 //******************** End of __WINDOWS_DS__ *********************//
\r
6809 #if defined(__LINUX_ALSA__)
\r
6811 #include <alsa/asoundlib.h>
\r
6812 #include <unistd.h>
\r
6814 // A structure to hold various information related to the ALSA API
\r
6815 // implementation.
\r
6816 struct AlsaHandle {
\r
6817 snd_pcm_t *handles[2];
\r
6818 bool synchronized;
\r
6820 pthread_cond_t runnable_cv;
\r
6824 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6827 static void *alsaCallbackHandler( void * ptr );
\r
6829 RtApiAlsa :: RtApiAlsa()
\r
6831 // Nothing to do here.
\r
6834 RtApiAlsa :: ~RtApiAlsa()
\r
6836 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6839 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6841 unsigned nDevices = 0;
\r
6842 int result, subdevice, card;
\r
6844 snd_ctl_t *handle;
\r
6846 // Count cards and devices
\r
6848 snd_card_next( &card );
\r
6849 while ( card >= 0 ) {
\r
6850 sprintf( name, "hw:%d", card );
\r
6851 result = snd_ctl_open( &handle, name, 0 );
\r
6852 if ( result < 0 ) {
\r
6853 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6854 errorText_ = errorStream_.str();
\r
6855 error( RtAudioError::WARNING );
\r
6860 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6861 if ( result < 0 ) {
\r
6862 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6863 errorText_ = errorStream_.str();
\r
6864 error( RtAudioError::WARNING );
\r
6867 if ( subdevice < 0 )
\r
6872 snd_ctl_close( handle );
\r
6873 snd_card_next( &card );
\r
6876 result = snd_ctl_open( &handle, "default", 0 );
\r
6877 if (result == 0) {
\r
6879 snd_ctl_close( handle );
\r
6885 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6887 RtAudio::DeviceInfo info;
\r
6888 info.probed = false;
\r
6890 unsigned nDevices = 0;
\r
6891 int result, subdevice, card;
\r
6893 snd_ctl_t *chandle;
\r
6895 // Count cards and devices
\r
6898 snd_card_next( &card );
\r
6899 while ( card >= 0 ) {
\r
6900 sprintf( name, "hw:%d", card );
\r
6901 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6902 if ( result < 0 ) {
\r
6903 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6904 errorText_ = errorStream_.str();
\r
6905 error( RtAudioError::WARNING );
\r
6910 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6911 if ( result < 0 ) {
\r
6912 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6913 errorText_ = errorStream_.str();
\r
6914 error( RtAudioError::WARNING );
\r
6917 if ( subdevice < 0 ) break;
\r
6918 if ( nDevices == device ) {
\r
6919 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6925 snd_ctl_close( chandle );
\r
6926 snd_card_next( &card );
\r
6929 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6930 if ( result == 0 ) {
\r
6931 if ( nDevices == device ) {
\r
6932 strcpy( name, "default" );
\r
6938 if ( nDevices == 0 ) {
\r
6939 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6940 error( RtAudioError::INVALID_USE );
\r
6944 if ( device >= nDevices ) {
\r
6945 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6946 error( RtAudioError::INVALID_USE );
\r
6952 // If a stream is already open, we cannot probe the stream devices.
\r
6953 // Thus, use the saved results.
\r
6954 if ( stream_.state != STREAM_CLOSED &&
\r
6955 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6956 snd_ctl_close( chandle );
\r
6957 if ( device >= devices_.size() ) {
\r
6958 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6959 error( RtAudioError::WARNING );
\r
6962 return devices_[ device ];
\r
6965 int openMode = SND_PCM_ASYNC;
\r
6966 snd_pcm_stream_t stream;
\r
6967 snd_pcm_info_t *pcminfo;
\r
6968 snd_pcm_info_alloca( &pcminfo );
\r
6969 snd_pcm_t *phandle;
\r
6970 snd_pcm_hw_params_t *params;
\r
6971 snd_pcm_hw_params_alloca( ¶ms );
\r
6973 // First try for playback unless default device (which has subdev -1)
\r
6974 stream = SND_PCM_STREAM_PLAYBACK;
\r
6975 snd_pcm_info_set_stream( pcminfo, stream );
\r
6976 if ( subdevice != -1 ) {
\r
6977 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6978 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6980 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6981 if ( result < 0 ) {
\r
6982 // Device probably doesn't support playback.
\r
6983 goto captureProbe;
\r
6987 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6988 if ( result < 0 ) {
\r
6989 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6990 errorText_ = errorStream_.str();
\r
6991 error( RtAudioError::WARNING );
\r
6992 goto captureProbe;
\r
6995 // The device is open ... fill the parameter structure.
\r
6996 result = snd_pcm_hw_params_any( phandle, params );
\r
6997 if ( result < 0 ) {
\r
6998 snd_pcm_close( phandle );
\r
6999 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7000 errorText_ = errorStream_.str();
\r
7001 error( RtAudioError::WARNING );
\r
7002 goto captureProbe;
\r
7005 // Get output channel information.
\r
7006 unsigned int value;
\r
7007 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7008 if ( result < 0 ) {
\r
7009 snd_pcm_close( phandle );
\r
7010 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7011 errorText_ = errorStream_.str();
\r
7012 error( RtAudioError::WARNING );
\r
7013 goto captureProbe;
\r
7015 info.outputChannels = value;
\r
7016 snd_pcm_close( phandle );
\r
7019 stream = SND_PCM_STREAM_CAPTURE;
\r
7020 snd_pcm_info_set_stream( pcminfo, stream );
\r
7022 // Now try for capture unless default device (with subdev = -1)
\r
7023 if ( subdevice != -1 ) {
\r
7024 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7025 snd_ctl_close( chandle );
\r
7026 if ( result < 0 ) {
\r
7027 // Device probably doesn't support capture.
\r
7028 if ( info.outputChannels == 0 ) return info;
\r
7029 goto probeParameters;
\r
7033 snd_ctl_close( chandle );
\r
7035 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7036 if ( result < 0 ) {
\r
7037 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7038 errorText_ = errorStream_.str();
\r
7039 error( RtAudioError::WARNING );
\r
7040 if ( info.outputChannels == 0 ) return info;
\r
7041 goto probeParameters;
\r
7044 // The device is open ... fill the parameter structure.
\r
7045 result = snd_pcm_hw_params_any( phandle, params );
\r
7046 if ( result < 0 ) {
\r
7047 snd_pcm_close( phandle );
\r
7048 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7049 errorText_ = errorStream_.str();
\r
7050 error( RtAudioError::WARNING );
\r
7051 if ( info.outputChannels == 0 ) return info;
\r
7052 goto probeParameters;
\r
7055 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7056 if ( result < 0 ) {
\r
7057 snd_pcm_close( phandle );
\r
7058 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7059 errorText_ = errorStream_.str();
\r
7060 error( RtAudioError::WARNING );
\r
7061 if ( info.outputChannels == 0 ) return info;
\r
7062 goto probeParameters;
\r
7064 info.inputChannels = value;
\r
7065 snd_pcm_close( phandle );
\r
7067 // If device opens for both playback and capture, we determine the channels.
\r
7068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7071 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7072 if ( device == 0 && info.outputChannels > 0 )
\r
7073 info.isDefaultOutput = true;
\r
7074 if ( device == 0 && info.inputChannels > 0 )
\r
7075 info.isDefaultInput = true;
\r
7078 // At this point, we just need to figure out the supported data
\r
7079 // formats and sample rates. We'll proceed by opening the device in
\r
7080 // the direction with the maximum number of channels, or playback if
\r
7081 // they are equal. This might limit our sample rate options, but so
\r
7084 if ( info.outputChannels >= info.inputChannels )
\r
7085 stream = SND_PCM_STREAM_PLAYBACK;
\r
7087 stream = SND_PCM_STREAM_CAPTURE;
\r
7088 snd_pcm_info_set_stream( pcminfo, stream );
\r
7090 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7091 if ( result < 0 ) {
\r
7092 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7093 errorText_ = errorStream_.str();
\r
7094 error( RtAudioError::WARNING );
\r
7098 // The device is open ... fill the parameter structure.
\r
7099 result = snd_pcm_hw_params_any( phandle, params );
\r
7100 if ( result < 0 ) {
\r
7101 snd_pcm_close( phandle );
\r
7102 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7103 errorText_ = errorStream_.str();
\r
7104 error( RtAudioError::WARNING );
\r
7108 // Test our discrete set of sample rate values.
\r
7109 info.sampleRates.clear();
\r
7110 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7111 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7112 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7114 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7115 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7118 if ( info.sampleRates.size() == 0 ) {
\r
7119 snd_pcm_close( phandle );
\r
7120 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7121 errorText_ = errorStream_.str();
\r
7122 error( RtAudioError::WARNING );
\r
7126 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7127 snd_pcm_format_t format;
\r
7128 info.nativeFormats = 0;
\r
7129 format = SND_PCM_FORMAT_S8;
\r
7130 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7131 info.nativeFormats |= RTAUDIO_SINT8;
\r
7132 format = SND_PCM_FORMAT_S16;
\r
7133 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7134 info.nativeFormats |= RTAUDIO_SINT16;
\r
7135 format = SND_PCM_FORMAT_S24;
\r
7136 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7137 info.nativeFormats |= RTAUDIO_SINT24;
\r
7138 format = SND_PCM_FORMAT_S32;
\r
7139 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7140 info.nativeFormats |= RTAUDIO_SINT32;
\r
7141 format = SND_PCM_FORMAT_FLOAT;
\r
7142 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7143 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7144 format = SND_PCM_FORMAT_FLOAT64;
\r
7145 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7146 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7148 // Check that we have at least one supported format
\r
7149 if ( info.nativeFormats == 0 ) {
\r
7150 snd_pcm_close( phandle );
\r
7151 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7152 errorText_ = errorStream_.str();
\r
7153 error( RtAudioError::WARNING );
\r
7157 // Get the device name
\r
7159 result = snd_card_get_name( card, &cardname );
\r
7160 if ( result >= 0 ) {
\r
7161 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7166 // That's all ... close the device and return
\r
7167 snd_pcm_close( phandle );
\r
7168 info.probed = true;
\r
7172 void RtApiAlsa :: saveDeviceInfo( void )
\r
7176 unsigned int nDevices = getDeviceCount();
\r
7177 devices_.resize( nDevices );
\r
7178 for ( unsigned int i=0; i<nDevices; i++ )
\r
7179 devices_[i] = getDeviceInfo( i );
\r
7182 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7183 unsigned int firstChannel, unsigned int sampleRate,
\r
7184 RtAudioFormat format, unsigned int *bufferSize,
\r
7185 RtAudio::StreamOptions *options )
\r
7188 #if defined(__RTAUDIO_DEBUG__)
\r
7189 snd_output_t *out;
\r
7190 snd_output_stdio_attach(&out, stderr, 0);
\r
7193 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7195 unsigned nDevices = 0;
\r
7196 int result, subdevice, card;
\r
7198 snd_ctl_t *chandle;
\r
7200 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7201 snprintf(name, sizeof(name), "%s", "default");
\r
7203 // Count cards and devices
\r
7205 snd_card_next( &card );
\r
7206 while ( card >= 0 ) {
\r
7207 sprintf( name, "hw:%d", card );
\r
7208 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7209 if ( result < 0 ) {
\r
7210 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7211 errorText_ = errorStream_.str();
\r
7216 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7217 if ( result < 0 ) break;
\r
7218 if ( subdevice < 0 ) break;
\r
7219 if ( nDevices == device ) {
\r
7220 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7221 snd_ctl_close( chandle );
\r
7226 snd_ctl_close( chandle );
\r
7227 snd_card_next( &card );
\r
7230 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7231 if ( result == 0 ) {
\r
7232 if ( nDevices == device ) {
\r
7233 strcpy( name, "default" );
\r
7239 if ( nDevices == 0 ) {
\r
7240 // This should not happen because a check is made before this function is called.
\r
7241 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7245 if ( device >= nDevices ) {
\r
7246 // This should not happen because a check is made before this function is called.
\r
7247 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7254 // The getDeviceInfo() function will not work for a device that is
\r
7255 // already open. Thus, we'll probe the system before opening a
\r
7256 // stream and save the results for use by getDeviceInfo().
\r
7257 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7258 this->saveDeviceInfo();
\r
7260 snd_pcm_stream_t stream;
\r
7261 if ( mode == OUTPUT )
\r
7262 stream = SND_PCM_STREAM_PLAYBACK;
\r
7264 stream = SND_PCM_STREAM_CAPTURE;
\r
7266 snd_pcm_t *phandle;
\r
7267 int openMode = SND_PCM_ASYNC;
\r
7268 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7269 if ( result < 0 ) {
\r
7270 if ( mode == OUTPUT )
\r
7271 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7273 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7274 errorText_ = errorStream_.str();
\r
7278 // Fill the parameter structure.
\r
7279 snd_pcm_hw_params_t *hw_params;
\r
7280 snd_pcm_hw_params_alloca( &hw_params );
\r
7281 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7282 if ( result < 0 ) {
\r
7283 snd_pcm_close( phandle );
\r
7284 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7285 errorText_ = errorStream_.str();
\r
7289 #if defined(__RTAUDIO_DEBUG__)
\r
7290 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7291 snd_pcm_hw_params_dump( hw_params, out );
\r
7294 // Set access ... check user preference.
\r
7295 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7296 stream_.userInterleaved = false;
\r
7297 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7298 if ( result < 0 ) {
\r
7299 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7300 stream_.deviceInterleaved[mode] = true;
\r
7303 stream_.deviceInterleaved[mode] = false;
\r
7306 stream_.userInterleaved = true;
\r
7307 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7308 if ( result < 0 ) {
\r
7309 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7310 stream_.deviceInterleaved[mode] = false;
\r
7313 stream_.deviceInterleaved[mode] = true;
\r
7316 if ( result < 0 ) {
\r
7317 snd_pcm_close( phandle );
\r
7318 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7319 errorText_ = errorStream_.str();
\r
7323 // Determine how to set the device format.
\r
7324 stream_.userFormat = format;
\r
7325 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7327 if ( format == RTAUDIO_SINT8 )
\r
7328 deviceFormat = SND_PCM_FORMAT_S8;
\r
7329 else if ( format == RTAUDIO_SINT16 )
\r
7330 deviceFormat = SND_PCM_FORMAT_S16;
\r
7331 else if ( format == RTAUDIO_SINT24 )
\r
7332 deviceFormat = SND_PCM_FORMAT_S24;
\r
7333 else if ( format == RTAUDIO_SINT32 )
\r
7334 deviceFormat = SND_PCM_FORMAT_S32;
\r
7335 else if ( format == RTAUDIO_FLOAT32 )
\r
7336 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7337 else if ( format == RTAUDIO_FLOAT64 )
\r
7338 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7340 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7341 stream_.deviceFormat[mode] = format;
\r
7345 // The user requested format is not natively supported by the device.
\r
7346 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7347 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7348 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7352 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7353 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7354 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7358 deviceFormat = SND_PCM_FORMAT_S32;
\r
7359 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7360 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7364 deviceFormat = SND_PCM_FORMAT_S24;
\r
7365 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7366 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7370 deviceFormat = SND_PCM_FORMAT_S16;
\r
7371 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7372 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7376 deviceFormat = SND_PCM_FORMAT_S8;
\r
7377 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7378 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7382 // If we get here, no supported format was found.
\r
7383 snd_pcm_close( phandle );
\r
7384 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7385 errorText_ = errorStream_.str();
\r
7389 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7390 if ( result < 0 ) {
\r
7391 snd_pcm_close( phandle );
\r
7392 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7393 errorText_ = errorStream_.str();
\r
7397 // Determine whether byte-swaping is necessary.
\r
7398 stream_.doByteSwap[mode] = false;
\r
7399 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7400 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7401 if ( result == 0 )
\r
7402 stream_.doByteSwap[mode] = true;
\r
7403 else if (result < 0) {
\r
7404 snd_pcm_close( phandle );
\r
7405 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7406 errorText_ = errorStream_.str();
\r
7411 // Set the sample rate.
\r
7412 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7413 if ( result < 0 ) {
\r
7414 snd_pcm_close( phandle );
\r
7415 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7416 errorText_ = errorStream_.str();
\r
7420 // Determine the number of channels for this device. We support a possible
\r
7421 // minimum device channel number > than the value requested by the user.
\r
7422 stream_.nUserChannels[mode] = channels;
\r
7423 unsigned int value;
\r
7424 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7425 unsigned int deviceChannels = value;
\r
7426 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7427 snd_pcm_close( phandle );
\r
7428 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7429 errorText_ = errorStream_.str();
\r
7433 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7434 if ( result < 0 ) {
\r
7435 snd_pcm_close( phandle );
\r
7436 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7437 errorText_ = errorStream_.str();
\r
7440 deviceChannels = value;
\r
7441 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7442 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7444 // Set the device channels.
\r
7445 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7446 if ( result < 0 ) {
\r
7447 snd_pcm_close( phandle );
\r
7448 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7449 errorText_ = errorStream_.str();
\r
7453 // Set the buffer (or period) size.
\r
7455 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7456 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7457 if ( result < 0 ) {
\r
7458 snd_pcm_close( phandle );
\r
7459 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7460 errorText_ = errorStream_.str();
\r
7463 *bufferSize = periodSize;
\r
7465 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7466 unsigned int periods = 0;
\r
7467 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7468 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7469 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7470 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7471 if ( result < 0 ) {
\r
7472 snd_pcm_close( phandle );
\r
7473 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7474 errorText_ = errorStream_.str();
\r
7478 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7479 // MUST be the same in both directions!
\r
7480 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7481 snd_pcm_close( phandle );
\r
7482 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7483 errorText_ = errorStream_.str();
\r
7487 stream_.bufferSize = *bufferSize;
\r
7489 // Install the hardware configuration
\r
7490 result = snd_pcm_hw_params( phandle, hw_params );
\r
7491 if ( result < 0 ) {
\r
7492 snd_pcm_close( phandle );
\r
7493 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7494 errorText_ = errorStream_.str();
\r
7498 #if defined(__RTAUDIO_DEBUG__)
\r
7499 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7500 snd_pcm_hw_params_dump( hw_params, out );
\r
7503 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7504 snd_pcm_sw_params_t *sw_params = NULL;
\r
7505 snd_pcm_sw_params_alloca( &sw_params );
\r
7506 snd_pcm_sw_params_current( phandle, sw_params );
\r
7507 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7508 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7509 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7511 // The following two settings were suggested by Theo Veenker
\r
7512 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7513 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7515 // here are two options for a fix
\r
7516 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7517 snd_pcm_uframes_t val;
\r
7518 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7519 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7521 result = snd_pcm_sw_params( phandle, sw_params );
\r
7522 if ( result < 0 ) {
\r
7523 snd_pcm_close( phandle );
\r
7524 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7525 errorText_ = errorStream_.str();
\r
7529 #if defined(__RTAUDIO_DEBUG__)
\r
7530 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7531 snd_pcm_sw_params_dump( sw_params, out );
\r
7534 // Set flags for buffer conversion
\r
7535 stream_.doConvertBuffer[mode] = false;
\r
7536 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7537 stream_.doConvertBuffer[mode] = true;
\r
7538 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7539 stream_.doConvertBuffer[mode] = true;
\r
7540 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7541 stream_.nUserChannels[mode] > 1 )
\r
7542 stream_.doConvertBuffer[mode] = true;
\r
7544 // Allocate the ApiHandle if necessary and then save.
\r
7545 AlsaHandle *apiInfo = 0;
\r
7546 if ( stream_.apiHandle == 0 ) {
\r
7548 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7550 catch ( std::bad_alloc& ) {
\r
7551 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7555 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7556 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7560 stream_.apiHandle = (void *) apiInfo;
\r
7561 apiInfo->handles[0] = 0;
\r
7562 apiInfo->handles[1] = 0;
\r
7565 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7567 apiInfo->handles[mode] = phandle;
\r
7570 // Allocate necessary internal buffers.
\r
7571 unsigned long bufferBytes;
\r
7572 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7573 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7574 if ( stream_.userBuffer[mode] == NULL ) {
\r
7575 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7579 if ( stream_.doConvertBuffer[mode] ) {
\r
7581 bool makeBuffer = true;
\r
7582 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7583 if ( mode == INPUT ) {
\r
7584 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7585 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7586 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7590 if ( makeBuffer ) {
\r
7591 bufferBytes *= *bufferSize;
\r
7592 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7593 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7594 if ( stream_.deviceBuffer == NULL ) {
\r
7595 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7601 stream_.sampleRate = sampleRate;
\r
7602 stream_.nBuffers = periods;
\r
7603 stream_.device[mode] = device;
\r
7604 stream_.state = STREAM_STOPPED;
\r
7606 // Setup the buffer conversion information structure.
\r
7607 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7609 // Setup thread if necessary.
\r
7610 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7611 // We had already set up an output stream.
\r
7612 stream_.mode = DUPLEX;
\r
7613 // Link the streams if possible.
\r
7614 apiInfo->synchronized = false;
\r
7615 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7616 apiInfo->synchronized = true;
\r
7618 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7619 error( RtAudioError::WARNING );
\r
7623 stream_.mode = mode;
\r
7625 // Setup callback thread.
\r
7626 stream_.callbackInfo.object = (void *) this;
\r
7628 // Set the thread attributes for joinable and realtime scheduling
\r
7629 // priority (optional). The higher priority will only take affect
\r
7630 // if the program is run as root or suid. Note, under Linux
\r
7631 // processes with CAP_SYS_NICE privilege, a user can change
\r
7632 // scheduling policy and priority (thus need not be root). See
\r
7633 // POSIX "capabilities".
\r
7634 pthread_attr_t attr;
\r
7635 pthread_attr_init( &attr );
\r
7636 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7638 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7639 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7640 // We previously attempted to increase the audio callback priority
\r
7641 // to SCHED_RR here via the attributes. However, while no errors
\r
7642 // were reported in doing so, it did not work. So, now this is
\r
7643 // done in the alsaCallbackHandler function.
\r
7644 stream_.callbackInfo.doRealtime = true;
\r
7645 int priority = options->priority;
\r
7646 int min = sched_get_priority_min( SCHED_RR );
\r
7647 int max = sched_get_priority_max( SCHED_RR );
\r
7648 if ( priority < min ) priority = min;
\r
7649 else if ( priority > max ) priority = max;
\r
7650 stream_.callbackInfo.priority = priority;
\r
7654 stream_.callbackInfo.isRunning = true;
\r
7655 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7656 pthread_attr_destroy( &attr );
\r
7658 stream_.callbackInfo.isRunning = false;
\r
7659 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7668 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7669 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7670 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7672 stream_.apiHandle = 0;
\r
7675 if ( phandle) snd_pcm_close( phandle );
\r
7677 for ( int i=0; i<2; i++ ) {
\r
7678 if ( stream_.userBuffer[i] ) {
\r
7679 free( stream_.userBuffer[i] );
\r
7680 stream_.userBuffer[i] = 0;
\r
7684 if ( stream_.deviceBuffer ) {
\r
7685 free( stream_.deviceBuffer );
\r
7686 stream_.deviceBuffer = 0;
\r
7689 stream_.state = STREAM_CLOSED;
\r
7693 void RtApiAlsa :: closeStream()
\r
7695 if ( stream_.state == STREAM_CLOSED ) {
\r
7696 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7697 error( RtAudioError::WARNING );
\r
7701 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7702 stream_.callbackInfo.isRunning = false;
\r
7703 MUTEX_LOCK( &stream_.mutex );
\r
7704 if ( stream_.state == STREAM_STOPPED ) {
\r
7705 apiInfo->runnable = true;
\r
7706 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7708 MUTEX_UNLOCK( &stream_.mutex );
\r
7709 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7711 if ( stream_.state == STREAM_RUNNING ) {
\r
7712 stream_.state = STREAM_STOPPED;
\r
7713 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7714 snd_pcm_drop( apiInfo->handles[0] );
\r
7715 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7716 snd_pcm_drop( apiInfo->handles[1] );
\r
7720 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7721 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7722 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7724 stream_.apiHandle = 0;
\r
7727 for ( int i=0; i<2; i++ ) {
\r
7728 if ( stream_.userBuffer[i] ) {
\r
7729 free( stream_.userBuffer[i] );
\r
7730 stream_.userBuffer[i] = 0;
\r
7734 if ( stream_.deviceBuffer ) {
\r
7735 free( stream_.deviceBuffer );
\r
7736 stream_.deviceBuffer = 0;
\r
7739 stream_.mode = UNINITIALIZED;
\r
7740 stream_.state = STREAM_CLOSED;
\r
7743 void RtApiAlsa :: startStream()
\r
7745 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7748 if ( stream_.state == STREAM_RUNNING ) {
\r
7749 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7750 error( RtAudioError::WARNING );
\r
7754 MUTEX_LOCK( &stream_.mutex );
\r
7757 snd_pcm_state_t state;
\r
7758 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7759 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7760 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7761 state = snd_pcm_state( handle[0] );
\r
7762 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7763 result = snd_pcm_prepare( handle[0] );
\r
7764 if ( result < 0 ) {
\r
7765 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7766 errorText_ = errorStream_.str();
\r
7772 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7773 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7774 state = snd_pcm_state( handle[1] );
\r
7775 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7776 result = snd_pcm_prepare( handle[1] );
\r
7777 if ( result < 0 ) {
\r
7778 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7779 errorText_ = errorStream_.str();
\r
7785 stream_.state = STREAM_RUNNING;
\r
7788 apiInfo->runnable = true;
\r
7789 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7790 MUTEX_UNLOCK( &stream_.mutex );
\r
7792 if ( result >= 0 ) return;
\r
7793 error( RtAudioError::SYSTEM_ERROR );
\r
7796 void RtApiAlsa :: stopStream()
\r
7799 if ( stream_.state == STREAM_STOPPED ) {
\r
7800 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7801 error( RtAudioError::WARNING );
\r
7805 stream_.state = STREAM_STOPPED;
\r
7806 MUTEX_LOCK( &stream_.mutex );
\r
7809 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7810 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7811 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7812 if ( apiInfo->synchronized )
\r
7813 result = snd_pcm_drop( handle[0] );
\r
7815 result = snd_pcm_drain( handle[0] );
\r
7816 if ( result < 0 ) {
\r
7817 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7818 errorText_ = errorStream_.str();
\r
7823 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7824 result = snd_pcm_drop( handle[1] );
\r
7825 if ( result < 0 ) {
\r
7826 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7827 errorText_ = errorStream_.str();
\r
7833 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7834 MUTEX_UNLOCK( &stream_.mutex );
\r
7836 if ( result >= 0 ) return;
\r
7837 error( RtAudioError::SYSTEM_ERROR );
\r
7840 void RtApiAlsa :: abortStream()
\r
7843 if ( stream_.state == STREAM_STOPPED ) {
\r
7844 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7845 error( RtAudioError::WARNING );
\r
7849 stream_.state = STREAM_STOPPED;
\r
7850 MUTEX_LOCK( &stream_.mutex );
\r
7853 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7854 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7855 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7856 result = snd_pcm_drop( handle[0] );
\r
7857 if ( result < 0 ) {
\r
7858 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7859 errorText_ = errorStream_.str();
\r
7864 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7865 result = snd_pcm_drop( handle[1] );
\r
7866 if ( result < 0 ) {
\r
7867 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7868 errorText_ = errorStream_.str();
\r
7874 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7875 MUTEX_UNLOCK( &stream_.mutex );
\r
7877 if ( result >= 0 ) return;
\r
7878 error( RtAudioError::SYSTEM_ERROR );
\r
7881 void RtApiAlsa :: callbackEvent()
\r
7883 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7884 if ( stream_.state == STREAM_STOPPED ) {
\r
7885 MUTEX_LOCK( &stream_.mutex );
\r
7886 while ( !apiInfo->runnable )
\r
7887 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7889 if ( stream_.state != STREAM_RUNNING ) {
\r
7890 MUTEX_UNLOCK( &stream_.mutex );
\r
7893 MUTEX_UNLOCK( &stream_.mutex );
\r
7896 if ( stream_.state == STREAM_CLOSED ) {
\r
7897 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7898 error( RtAudioError::WARNING );
\r
7902 int doStopStream = 0;
\r
7903 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7904 double streamTime = getStreamTime();
\r
7905 RtAudioStreamStatus status = 0;
\r
7906 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7907 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7908 apiInfo->xrun[0] = false;
\r
7910 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7911 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7912 apiInfo->xrun[1] = false;
\r
7914 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7915 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7917 if ( doStopStream == 2 ) {
\r
7922 MUTEX_LOCK( &stream_.mutex );
\r
7924 // The state might change while waiting on a mutex.
\r
7925 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7930 snd_pcm_t **handle;
\r
7931 snd_pcm_sframes_t frames;
\r
7932 RtAudioFormat format;
\r
7933 handle = (snd_pcm_t **) apiInfo->handles;
\r
7935 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7937 // Setup parameters.
\r
7938 if ( stream_.doConvertBuffer[1] ) {
\r
7939 buffer = stream_.deviceBuffer;
\r
7940 channels = stream_.nDeviceChannels[1];
\r
7941 format = stream_.deviceFormat[1];
\r
7944 buffer = stream_.userBuffer[1];
\r
7945 channels = stream_.nUserChannels[1];
\r
7946 format = stream_.userFormat;
\r
7949 // Read samples from device in interleaved/non-interleaved format.
\r
7950 if ( stream_.deviceInterleaved[1] )
\r
7951 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7953 void *bufs[channels];
\r
7954 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7955 for ( int i=0; i<channels; i++ )
\r
7956 bufs[i] = (void *) (buffer + (i * offset));
\r
7957 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7960 if ( result < (int) stream_.bufferSize ) {
\r
7961 // Either an error or overrun occured.
\r
7962 if ( result == -EPIPE ) {
\r
7963 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7964 if ( state == SND_PCM_STATE_XRUN ) {
\r
7965 apiInfo->xrun[1] = true;
\r
7966 result = snd_pcm_prepare( handle[1] );
\r
7967 if ( result < 0 ) {
\r
7968 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7969 errorText_ = errorStream_.str();
\r
7973 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7974 errorText_ = errorStream_.str();
\r
7978 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7979 errorText_ = errorStream_.str();
\r
7981 error( RtAudioError::WARNING );
\r
7985 // Do byte swapping if necessary.
\r
7986 if ( stream_.doByteSwap[1] )
\r
7987 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7989 // Do buffer conversion if necessary.
\r
7990 if ( stream_.doConvertBuffer[1] )
\r
7991 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7993 // Check stream latency
\r
7994 result = snd_pcm_delay( handle[1], &frames );
\r
7995 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8000 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8002 // Setup parameters and do buffer conversion if necessary.
\r
8003 if ( stream_.doConvertBuffer[0] ) {
\r
8004 buffer = stream_.deviceBuffer;
\r
8005 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8006 channels = stream_.nDeviceChannels[0];
\r
8007 format = stream_.deviceFormat[0];
\r
8010 buffer = stream_.userBuffer[0];
\r
8011 channels = stream_.nUserChannels[0];
\r
8012 format = stream_.userFormat;
\r
8015 // Do byte swapping if necessary.
\r
8016 if ( stream_.doByteSwap[0] )
\r
8017 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8019 // Write samples to device in interleaved/non-interleaved format.
\r
8020 if ( stream_.deviceInterleaved[0] )
\r
8021 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8023 void *bufs[channels];
\r
8024 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8025 for ( int i=0; i<channels; i++ )
\r
8026 bufs[i] = (void *) (buffer + (i * offset));
\r
8027 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8030 if ( result < (int) stream_.bufferSize ) {
\r
8031 // Either an error or underrun occured.
\r
8032 if ( result == -EPIPE ) {
\r
8033 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8034 if ( state == SND_PCM_STATE_XRUN ) {
\r
8035 apiInfo->xrun[0] = true;
\r
8036 result = snd_pcm_prepare( handle[0] );
\r
8037 if ( result < 0 ) {
\r
8038 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8039 errorText_ = errorStream_.str();
\r
8042 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8045 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8046 errorText_ = errorStream_.str();
\r
8050 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8051 errorText_ = errorStream_.str();
\r
8053 error( RtAudioError::WARNING );
\r
8057 // Check stream latency
\r
8058 result = snd_pcm_delay( handle[0], &frames );
\r
8059 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8063 MUTEX_UNLOCK( &stream_.mutex );
\r
8065 RtApi::tickStreamTime();
\r
8066 if ( doStopStream == 1 ) this->stopStream();
\r
8069 static void *alsaCallbackHandler( void *ptr )
\r
8071 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8072 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8073 bool *isRunning = &info->isRunning;
\r
8075 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8076 if ( info->doRealtime ) {
\r
8077 pthread_t tID = pthread_self(); // ID of this thread
\r
8078 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8079 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8083 while ( *isRunning == true ) {
\r
8084 pthread_testcancel();
\r
8085 object->callbackEvent();
\r
8088 pthread_exit( NULL );
\r
8091 //******************** End of __LINUX_ALSA__ *********************//
\r
8094 #if defined(__LINUX_PULSE__)
\r
8096 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8097 // and Tristan Matthews.
\r
8099 #include <pulse/error.h>
\r
8100 #include <pulse/simple.h>
\r
8103 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8104 44100, 48000, 96000, 0};
\r
8106 struct rtaudio_pa_format_mapping_t {
\r
8107 RtAudioFormat rtaudio_format;
\r
8108 pa_sample_format_t pa_format;
\r
8111 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8112 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8113 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8114 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8115 {0, PA_SAMPLE_INVALID}};
\r
8117 struct PulseAudioHandle {
\r
8118 pa_simple *s_play;
\r
8121 pthread_cond_t runnable_cv;
\r
8123 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8126 RtApiPulse::~RtApiPulse()
\r
8128 if ( stream_.state != STREAM_CLOSED )
\r
8132 unsigned int RtApiPulse::getDeviceCount( void )
\r
8137 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8139 RtAudio::DeviceInfo info;
\r
8140 info.probed = true;
\r
8141 info.name = "PulseAudio";
\r
8142 info.outputChannels = 2;
\r
8143 info.inputChannels = 2;
\r
8144 info.duplexChannels = 2;
\r
8145 info.isDefaultOutput = true;
\r
8146 info.isDefaultInput = true;
\r
8148 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8149 info.sampleRates.push_back( *sr );
\r
8151 info.preferredSampleRate = 48000;
\r
8152 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8157 static void *pulseaudio_callback( void * user )
\r
8159 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8160 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8161 volatile bool *isRunning = &cbi->isRunning;
\r
8163 while ( *isRunning ) {
\r
8164 pthread_testcancel();
\r
8165 context->callbackEvent();
\r
8168 pthread_exit( NULL );
\r
8171 void RtApiPulse::closeStream( void )
\r
8173 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8175 stream_.callbackInfo.isRunning = false;
\r
8177 MUTEX_LOCK( &stream_.mutex );
\r
8178 if ( stream_.state == STREAM_STOPPED ) {
\r
8179 pah->runnable = true;
\r
8180 pthread_cond_signal( &pah->runnable_cv );
\r
8182 MUTEX_UNLOCK( &stream_.mutex );
\r
8184 pthread_join( pah->thread, 0 );
\r
8185 if ( pah->s_play ) {
\r
8186 pa_simple_flush( pah->s_play, NULL );
\r
8187 pa_simple_free( pah->s_play );
\r
8190 pa_simple_free( pah->s_rec );
\r
8192 pthread_cond_destroy( &pah->runnable_cv );
\r
8194 stream_.apiHandle = 0;
\r
8197 if ( stream_.userBuffer[0] ) {
\r
8198 free( stream_.userBuffer[0] );
\r
8199 stream_.userBuffer[0] = 0;
\r
8201 if ( stream_.userBuffer[1] ) {
\r
8202 free( stream_.userBuffer[1] );
\r
8203 stream_.userBuffer[1] = 0;
\r
8206 stream_.state = STREAM_CLOSED;
\r
8207 stream_.mode = UNINITIALIZED;
\r
8210 void RtApiPulse::callbackEvent( void )
\r
8212 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8214 if ( stream_.state == STREAM_STOPPED ) {
\r
8215 MUTEX_LOCK( &stream_.mutex );
\r
8216 while ( !pah->runnable )
\r
8217 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8219 if ( stream_.state != STREAM_RUNNING ) {
\r
8220 MUTEX_UNLOCK( &stream_.mutex );
\r
8223 MUTEX_UNLOCK( &stream_.mutex );
\r
8226 if ( stream_.state == STREAM_CLOSED ) {
\r
8227 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8228 "this shouldn't happen!";
\r
8229 error( RtAudioError::WARNING );
\r
8233 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8234 double streamTime = getStreamTime();
\r
8235 RtAudioStreamStatus status = 0;
\r
8236 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8237 stream_.bufferSize, streamTime, status,
\r
8238 stream_.callbackInfo.userData );
\r
8240 if ( doStopStream == 2 ) {
\r
8245 MUTEX_LOCK( &stream_.mutex );
\r
8246 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8247 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8249 if ( stream_.state != STREAM_RUNNING )
\r
8254 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8255 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8256 convertBuffer( stream_.deviceBuffer,
\r
8257 stream_.userBuffer[OUTPUT],
\r
8258 stream_.convertInfo[OUTPUT] );
\r
8259 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8260 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8262 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8263 formatBytes( stream_.userFormat );
\r
8265 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8266 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8267 pa_strerror( pa_error ) << ".";
\r
8268 errorText_ = errorStream_.str();
\r
8269 error( RtAudioError::WARNING );
\r
8273 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8274 if ( stream_.doConvertBuffer[INPUT] )
\r
8275 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8276 formatBytes( stream_.deviceFormat[INPUT] );
\r
8278 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8279 formatBytes( stream_.userFormat );
\r
8281 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8282 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8283 pa_strerror( pa_error ) << ".";
\r
8284 errorText_ = errorStream_.str();
\r
8285 error( RtAudioError::WARNING );
\r
8287 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8288 convertBuffer( stream_.userBuffer[INPUT],
\r
8289 stream_.deviceBuffer,
\r
8290 stream_.convertInfo[INPUT] );
\r
8295 MUTEX_UNLOCK( &stream_.mutex );
\r
8296 RtApi::tickStreamTime();
\r
8298 if ( doStopStream == 1 )
\r
8302 void RtApiPulse::startStream( void )
\r
8304 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8306 if ( stream_.state == STREAM_CLOSED ) {
\r
8307 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8308 error( RtAudioError::INVALID_USE );
\r
8311 if ( stream_.state == STREAM_RUNNING ) {
\r
8312 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8313 error( RtAudioError::WARNING );
\r
8317 MUTEX_LOCK( &stream_.mutex );
\r
8319 stream_.state = STREAM_RUNNING;
\r
8321 pah->runnable = true;
\r
8322 pthread_cond_signal( &pah->runnable_cv );
\r
8323 MUTEX_UNLOCK( &stream_.mutex );
\r
8326 void RtApiPulse::stopStream( void )
\r
8328 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8330 if ( stream_.state == STREAM_CLOSED ) {
\r
8331 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8332 error( RtAudioError::INVALID_USE );
\r
8335 if ( stream_.state == STREAM_STOPPED ) {
\r
8336 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8337 error( RtAudioError::WARNING );
\r
8341 stream_.state = STREAM_STOPPED;
\r
8342 MUTEX_LOCK( &stream_.mutex );
\r
8344 if ( pah && pah->s_play ) {
\r
8346 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8347 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8348 pa_strerror( pa_error ) << ".";
\r
8349 errorText_ = errorStream_.str();
\r
8350 MUTEX_UNLOCK( &stream_.mutex );
\r
8351 error( RtAudioError::SYSTEM_ERROR );
\r
8356 stream_.state = STREAM_STOPPED;
\r
8357 MUTEX_UNLOCK( &stream_.mutex );
\r
8360 void RtApiPulse::abortStream( void )
\r
8362 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8364 if ( stream_.state == STREAM_CLOSED ) {
\r
8365 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8366 error( RtAudioError::INVALID_USE );
\r
8369 if ( stream_.state == STREAM_STOPPED ) {
\r
8370 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8371 error( RtAudioError::WARNING );
\r
8375 stream_.state = STREAM_STOPPED;
\r
8376 MUTEX_LOCK( &stream_.mutex );
\r
8378 if ( pah && pah->s_play ) {
\r
8380 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8381 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8382 pa_strerror( pa_error ) << ".";
\r
8383 errorText_ = errorStream_.str();
\r
8384 MUTEX_UNLOCK( &stream_.mutex );
\r
8385 error( RtAudioError::SYSTEM_ERROR );
\r
8390 stream_.state = STREAM_STOPPED;
\r
8391 MUTEX_UNLOCK( &stream_.mutex );
\r
8394 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8395 unsigned int channels, unsigned int firstChannel,
\r
8396 unsigned int sampleRate, RtAudioFormat format,
\r
8397 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8399 PulseAudioHandle *pah = 0;
\r
8400 unsigned long bufferBytes = 0;
\r
8401 pa_sample_spec ss;
\r
8403 if ( device != 0 ) return false;
\r
8404 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8405 if ( channels != 1 && channels != 2 ) {
\r
8406 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8409 ss.channels = channels;
\r
8411 if ( firstChannel != 0 ) return false;
\r
8413 bool sr_found = false;
\r
8414 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8415 if ( sampleRate == *sr ) {
\r
8417 stream_.sampleRate = sampleRate;
\r
8418 ss.rate = sampleRate;
\r
8422 if ( !sr_found ) {
\r
8423 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8427 bool sf_found = 0;
\r
8428 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8429 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8430 if ( format == sf->rtaudio_format ) {
\r
8432 stream_.userFormat = sf->rtaudio_format;
\r
8433 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8434 ss.format = sf->pa_format;
\r
8438 if ( !sf_found ) { // Use internal data format conversion.
\r
8439 stream_.userFormat = format;
\r
8440 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8441 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8444 // Set other stream parameters.
\r
8445 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8446 else stream_.userInterleaved = true;
\r
8447 stream_.deviceInterleaved[mode] = true;
\r
8448 stream_.nBuffers = 1;
\r
8449 stream_.doByteSwap[mode] = false;
\r
8450 stream_.nUserChannels[mode] = channels;
\r
8451 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8452 stream_.channelOffset[mode] = 0;
\r
8453 std::string streamName = "RtAudio";
\r
8455 // Set flags for buffer conversion.
\r
8456 stream_.doConvertBuffer[mode] = false;
\r
8457 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8458 stream_.doConvertBuffer[mode] = true;
\r
8459 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8460 stream_.doConvertBuffer[mode] = true;
\r
8462 // Allocate necessary internal buffers.
\r
8463 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8464 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8465 if ( stream_.userBuffer[mode] == NULL ) {
\r
8466 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8469 stream_.bufferSize = *bufferSize;
\r
8471 if ( stream_.doConvertBuffer[mode] ) {
\r
8473 bool makeBuffer = true;
\r
8474 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8475 if ( mode == INPUT ) {
\r
8476 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8477 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8478 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8482 if ( makeBuffer ) {
\r
8483 bufferBytes *= *bufferSize;
\r
8484 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8485 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8486 if ( stream_.deviceBuffer == NULL ) {
\r
8487 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8493 stream_.device[mode] = device;
\r
8495 // Setup the buffer conversion information structure.
\r
8496 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8498 if ( !stream_.apiHandle ) {
\r
8499 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8501 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8505 stream_.apiHandle = pah;
\r
8506 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8507 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8511 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8514 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8517 pa_buffer_attr buffer_attr;
\r
8518 buffer_attr.fragsize = bufferBytes;
\r
8519 buffer_attr.maxlength = -1;
\r
8521 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8522 if ( !pah->s_rec ) {
\r
8523 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8528 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8529 if ( !pah->s_play ) {
\r
8530 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8538 if ( stream_.mode == UNINITIALIZED )
\r
8539 stream_.mode = mode;
\r
8540 else if ( stream_.mode == mode )
\r
8543 stream_.mode = DUPLEX;
\r
8545 if ( !stream_.callbackInfo.isRunning ) {
\r
8546 stream_.callbackInfo.object = this;
\r
8547 stream_.callbackInfo.isRunning = true;
\r
8548 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8549 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8554 stream_.state = STREAM_STOPPED;
\r
8558 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8559 pthread_cond_destroy( &pah->runnable_cv );
\r
8561 stream_.apiHandle = 0;
\r
8564 for ( int i=0; i<2; i++ ) {
\r
8565 if ( stream_.userBuffer[i] ) {
\r
8566 free( stream_.userBuffer[i] );
\r
8567 stream_.userBuffer[i] = 0;
\r
8571 if ( stream_.deviceBuffer ) {
\r
8572 free( stream_.deviceBuffer );
\r
8573 stream_.deviceBuffer = 0;
\r
8579 //******************** End of __LINUX_PULSE__ *********************//
\r
8582 #if defined(__LINUX_OSS__)
\r
8584 #include <unistd.h>
\r
8585 #include <sys/ioctl.h>
\r
8586 #include <unistd.h>
\r
8587 #include <fcntl.h>
\r
8588 #include <sys/soundcard.h>
\r
8589 #include <errno.h>
\r
8592 static void *ossCallbackHandler(void * ptr);
\r
8594 // A structure to hold various information related to the OSS API
\r
8595 // implementation.
\r
8596 struct OssHandle {
\r
8597 int id[2]; // device ids
\r
8600 pthread_cond_t runnable;
\r
8603 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8606 RtApiOss :: RtApiOss()
\r
8608 // Nothing to do here.
\r
8611 RtApiOss :: ~RtApiOss()
\r
8613 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8616 unsigned int RtApiOss :: getDeviceCount( void )
\r
8618 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8619 if ( mixerfd == -1 ) {
\r
8620 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8621 error( RtAudioError::WARNING );
\r
8625 oss_sysinfo sysinfo;
\r
8626 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8628 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8629 error( RtAudioError::WARNING );
\r
8634 return sysinfo.numaudios;
\r
8637 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8639 RtAudio::DeviceInfo info;
\r
8640 info.probed = false;
\r
8642 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8643 if ( mixerfd == -1 ) {
\r
8644 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8645 error( RtAudioError::WARNING );
\r
8649 oss_sysinfo sysinfo;
\r
8650 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8651 if ( result == -1 ) {
\r
8653 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8654 error( RtAudioError::WARNING );
\r
8658 unsigned nDevices = sysinfo.numaudios;
\r
8659 if ( nDevices == 0 ) {
\r
8661 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8662 error( RtAudioError::INVALID_USE );
\r
8666 if ( device >= nDevices ) {
\r
8668 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8669 error( RtAudioError::INVALID_USE );
\r
8673 oss_audioinfo ainfo;
\r
8674 ainfo.dev = device;
\r
8675 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8677 if ( result == -1 ) {
\r
8678 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8679 errorText_ = errorStream_.str();
\r
8680 error( RtAudioError::WARNING );
\r
8685 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8686 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8687 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8688 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8689 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8692 // Probe data formats ... do for input
\r
8693 unsigned long mask = ainfo.iformats;
\r
8694 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8695 info.nativeFormats |= RTAUDIO_SINT16;
\r
8696 if ( mask & AFMT_S8 )
\r
8697 info.nativeFormats |= RTAUDIO_SINT8;
\r
8698 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8699 info.nativeFormats |= RTAUDIO_SINT32;
\r
8700 if ( mask & AFMT_FLOAT )
\r
8701 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8702 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8703 info.nativeFormats |= RTAUDIO_SINT24;
\r
8705 // Check that we have at least one supported format
\r
8706 if ( info.nativeFormats == 0 ) {
\r
8707 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8708 errorText_ = errorStream_.str();
\r
8709 error( RtAudioError::WARNING );
\r
8713 // Probe the supported sample rates.
\r
8714 info.sampleRates.clear();
\r
8715 if ( ainfo.nrates ) {
\r
8716 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8717 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8718 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8719 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8721 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8722 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8730 // Check min and max rate values;
\r
8731 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8732 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8733 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8735 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8736 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8741 if ( info.sampleRates.size() == 0 ) {
\r
8742 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8743 errorText_ = errorStream_.str();
\r
8744 error( RtAudioError::WARNING );
\r
8747 info.probed = true;
\r
8748 info.name = ainfo.name;
\r
8755 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8756 unsigned int firstChannel, unsigned int sampleRate,
\r
8757 RtAudioFormat format, unsigned int *bufferSize,
\r
8758 RtAudio::StreamOptions *options )
\r
8760 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8761 if ( mixerfd == -1 ) {
\r
8762 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8766 oss_sysinfo sysinfo;
\r
8767 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8768 if ( result == -1 ) {
\r
8770 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8774 unsigned nDevices = sysinfo.numaudios;
\r
8775 if ( nDevices == 0 ) {
\r
8776 // This should not happen because a check is made before this function is called.
\r
8778 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8782 if ( device >= nDevices ) {
\r
8783 // This should not happen because a check is made before this function is called.
\r
8785 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8789 oss_audioinfo ainfo;
\r
8790 ainfo.dev = device;
\r
8791 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8793 if ( result == -1 ) {
\r
8794 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8795 errorText_ = errorStream_.str();
\r
8799 // Check if device supports input or output
\r
8800 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8801 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8802 if ( mode == OUTPUT )
\r
8803 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8805 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8806 errorText_ = errorStream_.str();
\r
8811 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8812 if ( mode == OUTPUT )
\r
8813 flags |= O_WRONLY;
\r
8814 else { // mode == INPUT
\r
8815 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8816 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8817 close( handle->id[0] );
\r
8818 handle->id[0] = 0;
\r
8819 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8820 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8821 errorText_ = errorStream_.str();
\r
8824 // Check that the number previously set channels is the same.
\r
8825 if ( stream_.nUserChannels[0] != channels ) {
\r
8826 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8827 errorText_ = errorStream_.str();
\r
8833 flags |= O_RDONLY;
\r
8836 // Set exclusive access if specified.
\r
8837 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8839 // Try to open the device.
\r
8841 fd = open( ainfo.devnode, flags, 0 );
\r
8843 if ( errno == EBUSY )
\r
8844 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8846 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8847 errorText_ = errorStream_.str();
\r
8851 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8853 if ( flags | O_RDWR ) {
\r
8854 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8855 if ( result == -1) {
\r
8856 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8857 errorText_ = errorStream_.str();
\r
8863 // Check the device channel support.
\r
8864 stream_.nUserChannels[mode] = channels;
\r
8865 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8867 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8868 errorText_ = errorStream_.str();
\r
8872 // Set the number of channels.
\r
8873 int deviceChannels = channels + firstChannel;
\r
8874 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8875 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8877 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8878 errorText_ = errorStream_.str();
\r
8881 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8883 // Get the data format mask
\r
8885 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8886 if ( result == -1 ) {
\r
8888 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8889 errorText_ = errorStream_.str();
\r
8893 // Determine how to set the device format.
\r
8894 stream_.userFormat = format;
\r
8895 int deviceFormat = -1;
\r
8896 stream_.doByteSwap[mode] = false;
\r
8897 if ( format == RTAUDIO_SINT8 ) {
\r
8898 if ( mask & AFMT_S8 ) {
\r
8899 deviceFormat = AFMT_S8;
\r
8900 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8903 else if ( format == RTAUDIO_SINT16 ) {
\r
8904 if ( mask & AFMT_S16_NE ) {
\r
8905 deviceFormat = AFMT_S16_NE;
\r
8906 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8908 else if ( mask & AFMT_S16_OE ) {
\r
8909 deviceFormat = AFMT_S16_OE;
\r
8910 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8911 stream_.doByteSwap[mode] = true;
\r
8914 else if ( format == RTAUDIO_SINT24 ) {
\r
8915 if ( mask & AFMT_S24_NE ) {
\r
8916 deviceFormat = AFMT_S24_NE;
\r
8917 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8919 else if ( mask & AFMT_S24_OE ) {
\r
8920 deviceFormat = AFMT_S24_OE;
\r
8921 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8922 stream_.doByteSwap[mode] = true;
\r
8925 else if ( format == RTAUDIO_SINT32 ) {
\r
8926 if ( mask & AFMT_S32_NE ) {
\r
8927 deviceFormat = AFMT_S32_NE;
\r
8928 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8930 else if ( mask & AFMT_S32_OE ) {
\r
8931 deviceFormat = AFMT_S32_OE;
\r
8932 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8933 stream_.doByteSwap[mode] = true;
\r
8937 if ( deviceFormat == -1 ) {
\r
8938 // The user requested format is not natively supported by the device.
\r
8939 if ( mask & AFMT_S16_NE ) {
\r
8940 deviceFormat = AFMT_S16_NE;
\r
8941 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8943 else if ( mask & AFMT_S32_NE ) {
\r
8944 deviceFormat = AFMT_S32_NE;
\r
8945 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8947 else if ( mask & AFMT_S24_NE ) {
\r
8948 deviceFormat = AFMT_S24_NE;
\r
8949 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8951 else if ( mask & AFMT_S16_OE ) {
\r
8952 deviceFormat = AFMT_S16_OE;
\r
8953 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8954 stream_.doByteSwap[mode] = true;
\r
8956 else if ( mask & AFMT_S32_OE ) {
\r
8957 deviceFormat = AFMT_S32_OE;
\r
8958 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8959 stream_.doByteSwap[mode] = true;
\r
8961 else if ( mask & AFMT_S24_OE ) {
\r
8962 deviceFormat = AFMT_S24_OE;
\r
8963 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8964 stream_.doByteSwap[mode] = true;
\r
8966 else if ( mask & AFMT_S8) {
\r
8967 deviceFormat = AFMT_S8;
\r
8968 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8972 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8973 // This really shouldn't happen ...
\r
8975 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8976 errorText_ = errorStream_.str();
\r
8980 // Set the data format.
\r
8981 int temp = deviceFormat;
\r
8982 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8983 if ( result == -1 || deviceFormat != temp ) {
\r
8985 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8986 errorText_ = errorStream_.str();
\r
8990 // Attempt to set the buffer size. According to OSS, the minimum
\r
8991 // number of buffers is two. The supposed minimum buffer size is 16
\r
8992 // bytes, so that will be our lower bound. The argument to this
\r
8993 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8994 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8995 // We'll check the actual value used near the end of the setup
\r
8997 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8998 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9000 if ( options ) buffers = options->numberOfBuffers;
\r
9001 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9002 if ( buffers < 2 ) buffers = 3;
\r
9003 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9004 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9005 if ( result == -1 ) {
\r
9007 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9008 errorText_ = errorStream_.str();
\r
9011 stream_.nBuffers = buffers;
\r
9013 // Save buffer size (in sample frames).
\r
9014 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9015 stream_.bufferSize = *bufferSize;
\r
9017 // Set the sample rate.
\r
9018 int srate = sampleRate;
\r
9019 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9020 if ( result == -1 ) {
\r
9022 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9023 errorText_ = errorStream_.str();
\r
9027 // Verify the sample rate setup worked.
\r
9028 if ( abs( srate - sampleRate ) > 100 ) {
\r
9030 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9031 errorText_ = errorStream_.str();
\r
9034 stream_.sampleRate = sampleRate;
\r
9036 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9037 // We're doing duplex setup here.
\r
9038 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9039 stream_.nDeviceChannels[0] = deviceChannels;
\r
9042 // Set interleaving parameters.
\r
9043 stream_.userInterleaved = true;
\r
9044 stream_.deviceInterleaved[mode] = true;
\r
9045 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9046 stream_.userInterleaved = false;
\r
9048 // Set flags for buffer conversion
\r
9049 stream_.doConvertBuffer[mode] = false;
\r
9050 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9051 stream_.doConvertBuffer[mode] = true;
\r
9052 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9053 stream_.doConvertBuffer[mode] = true;
\r
9054 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9055 stream_.nUserChannels[mode] > 1 )
\r
9056 stream_.doConvertBuffer[mode] = true;
\r
9058 // Allocate the stream handles if necessary and then save.
\r
9059 if ( stream_.apiHandle == 0 ) {
\r
9061 handle = new OssHandle;
\r
9063 catch ( std::bad_alloc& ) {
\r
9064 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9068 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9069 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9073 stream_.apiHandle = (void *) handle;
\r
9076 handle = (OssHandle *) stream_.apiHandle;
\r
9078 handle->id[mode] = fd;
\r
9080 // Allocate necessary internal buffers.
\r
9081 unsigned long bufferBytes;
\r
9082 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9083 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9084 if ( stream_.userBuffer[mode] == NULL ) {
\r
9085 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9089 if ( stream_.doConvertBuffer[mode] ) {
\r
9091 bool makeBuffer = true;
\r
9092 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9093 if ( mode == INPUT ) {
\r
9094 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9095 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9096 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9100 if ( makeBuffer ) {
\r
9101 bufferBytes *= *bufferSize;
\r
9102 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9103 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9104 if ( stream_.deviceBuffer == NULL ) {
\r
9105 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9111 stream_.device[mode] = device;
\r
9112 stream_.state = STREAM_STOPPED;
\r
9114 // Setup the buffer conversion information structure.
\r
9115 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9117 // Setup thread if necessary.
\r
9118 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9119 // We had already set up an output stream.
\r
9120 stream_.mode = DUPLEX;
\r
9121 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9124 stream_.mode = mode;
\r
9126 // Setup callback thread.
\r
9127 stream_.callbackInfo.object = (void *) this;
\r
9129 // Set the thread attributes for joinable and realtime scheduling
\r
9130 // priority. The higher priority will only take affect if the
\r
9131 // program is run as root or suid.
\r
9132 pthread_attr_t attr;
\r
9133 pthread_attr_init( &attr );
\r
9134 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9135 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9136 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9137 struct sched_param param;
\r
9138 int priority = options->priority;
\r
9139 int min = sched_get_priority_min( SCHED_RR );
\r
9140 int max = sched_get_priority_max( SCHED_RR );
\r
9141 if ( priority < min ) priority = min;
\r
9142 else if ( priority > max ) priority = max;
\r
9143 param.sched_priority = priority;
\r
9144 pthread_attr_setschedparam( &attr, ¶m );
\r
9145 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9148 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9150 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9153 stream_.callbackInfo.isRunning = true;
\r
9154 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9155 pthread_attr_destroy( &attr );
\r
9157 stream_.callbackInfo.isRunning = false;
\r
9158 errorText_ = "RtApiOss::error creating callback thread!";
\r
9167 pthread_cond_destroy( &handle->runnable );
\r
9168 if ( handle->id[0] ) close( handle->id[0] );
\r
9169 if ( handle->id[1] ) close( handle->id[1] );
\r
9171 stream_.apiHandle = 0;
\r
9174 for ( int i=0; i<2; i++ ) {
\r
9175 if ( stream_.userBuffer[i] ) {
\r
9176 free( stream_.userBuffer[i] );
\r
9177 stream_.userBuffer[i] = 0;
\r
9181 if ( stream_.deviceBuffer ) {
\r
9182 free( stream_.deviceBuffer );
\r
9183 stream_.deviceBuffer = 0;
\r
9189 void RtApiOss :: closeStream()
\r
9191 if ( stream_.state == STREAM_CLOSED ) {
\r
9192 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9193 error( RtAudioError::WARNING );
\r
9197 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9198 stream_.callbackInfo.isRunning = false;
\r
9199 MUTEX_LOCK( &stream_.mutex );
\r
9200 if ( stream_.state == STREAM_STOPPED )
\r
9201 pthread_cond_signal( &handle->runnable );
\r
9202 MUTEX_UNLOCK( &stream_.mutex );
\r
9203 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9205 if ( stream_.state == STREAM_RUNNING ) {
\r
9206 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9207 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9209 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9210 stream_.state = STREAM_STOPPED;
\r
9214 pthread_cond_destroy( &handle->runnable );
\r
9215 if ( handle->id[0] ) close( handle->id[0] );
\r
9216 if ( handle->id[1] ) close( handle->id[1] );
\r
9218 stream_.apiHandle = 0;
\r
9221 for ( int i=0; i<2; i++ ) {
\r
9222 if ( stream_.userBuffer[i] ) {
\r
9223 free( stream_.userBuffer[i] );
\r
9224 stream_.userBuffer[i] = 0;
\r
9228 if ( stream_.deviceBuffer ) {
\r
9229 free( stream_.deviceBuffer );
\r
9230 stream_.deviceBuffer = 0;
\r
9233 stream_.mode = UNINITIALIZED;
\r
9234 stream_.state = STREAM_CLOSED;
\r
9237 void RtApiOss :: startStream()
\r
9240 if ( stream_.state == STREAM_RUNNING ) {
\r
9241 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9242 error( RtAudioError::WARNING );
\r
9246 MUTEX_LOCK( &stream_.mutex );
\r
9248 stream_.state = STREAM_RUNNING;
\r
9250 // No need to do anything else here ... OSS automatically starts
\r
9251 // when fed samples.
\r
9253 MUTEX_UNLOCK( &stream_.mutex );
\r
9255 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9256 pthread_cond_signal( &handle->runnable );
\r
9259 void RtApiOss :: stopStream()
\r
9262 if ( stream_.state == STREAM_STOPPED ) {
\r
9263 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9264 error( RtAudioError::WARNING );
\r
9268 MUTEX_LOCK( &stream_.mutex );
\r
9270 // The state might change while waiting on a mutex.
\r
9271 if ( stream_.state == STREAM_STOPPED ) {
\r
9272 MUTEX_UNLOCK( &stream_.mutex );
\r
9277 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9278 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9280 // Flush the output with zeros a few times.
\r
9283 RtAudioFormat format;
\r
9285 if ( stream_.doConvertBuffer[0] ) {
\r
9286 buffer = stream_.deviceBuffer;
\r
9287 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9288 format = stream_.deviceFormat[0];
\r
9291 buffer = stream_.userBuffer[0];
\r
9292 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9293 format = stream_.userFormat;
\r
9296 memset( buffer, 0, samples * formatBytes(format) );
\r
9297 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9298 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9299 if ( result == -1 ) {
\r
9300 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9301 error( RtAudioError::WARNING );
\r
9305 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9306 if ( result == -1 ) {
\r
9307 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9308 errorText_ = errorStream_.str();
\r
9311 handle->triggered = false;
\r
9314 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9315 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9316 if ( result == -1 ) {
\r
9317 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9318 errorText_ = errorStream_.str();
\r
9324 stream_.state = STREAM_STOPPED;
\r
9325 MUTEX_UNLOCK( &stream_.mutex );
\r
9327 if ( result != -1 ) return;
\r
9328 error( RtAudioError::SYSTEM_ERROR );
\r
9331 void RtApiOss :: abortStream()
\r
9334 if ( stream_.state == STREAM_STOPPED ) {
\r
9335 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9336 error( RtAudioError::WARNING );
\r
9340 MUTEX_LOCK( &stream_.mutex );
\r
9342 // The state might change while waiting on a mutex.
\r
9343 if ( stream_.state == STREAM_STOPPED ) {
\r
9344 MUTEX_UNLOCK( &stream_.mutex );
\r
9349 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9350 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9351 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9352 if ( result == -1 ) {
\r
9353 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9354 errorText_ = errorStream_.str();
\r
9357 handle->triggered = false;
\r
9360 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9361 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9362 if ( result == -1 ) {
\r
9363 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9364 errorText_ = errorStream_.str();
\r
9370 stream_.state = STREAM_STOPPED;
\r
9371 MUTEX_UNLOCK( &stream_.mutex );
\r
9373 if ( result != -1 ) return;
\r
9374 error( RtAudioError::SYSTEM_ERROR );
\r
9377 void RtApiOss :: callbackEvent()
\r
9379 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9380 if ( stream_.state == STREAM_STOPPED ) {
\r
9381 MUTEX_LOCK( &stream_.mutex );
\r
9382 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9383 if ( stream_.state != STREAM_RUNNING ) {
\r
9384 MUTEX_UNLOCK( &stream_.mutex );
\r
9387 MUTEX_UNLOCK( &stream_.mutex );
\r
9390 if ( stream_.state == STREAM_CLOSED ) {
\r
9391 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9392 error( RtAudioError::WARNING );
\r
9396 // Invoke user callback to get fresh output data.
\r
9397 int doStopStream = 0;
\r
9398 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9399 double streamTime = getStreamTime();
\r
9400 RtAudioStreamStatus status = 0;
\r
9401 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9402 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9403 handle->xrun[0] = false;
\r
9405 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9406 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9407 handle->xrun[1] = false;
\r
9409 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9410 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9411 if ( doStopStream == 2 ) {
\r
9412 this->abortStream();
\r
9416 MUTEX_LOCK( &stream_.mutex );
\r
9418 // The state might change while waiting on a mutex.
\r
9419 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9424 RtAudioFormat format;
\r
9426 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9428 // Setup parameters and do buffer conversion if necessary.
\r
9429 if ( stream_.doConvertBuffer[0] ) {
\r
9430 buffer = stream_.deviceBuffer;
\r
9431 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9432 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9433 format = stream_.deviceFormat[0];
\r
9436 buffer = stream_.userBuffer[0];
\r
9437 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9438 format = stream_.userFormat;
\r
9441 // Do byte swapping if necessary.
\r
9442 if ( stream_.doByteSwap[0] )
\r
9443 byteSwapBuffer( buffer, samples, format );
\r
9445 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9447 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9448 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9449 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9450 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9451 handle->triggered = true;
\r
9454 // Write samples to device.
\r
9455 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9457 if ( result == -1 ) {
\r
9458 // We'll assume this is an underrun, though there isn't a
\r
9459 // specific means for determining that.
\r
9460 handle->xrun[0] = true;
\r
9461 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9462 error( RtAudioError::WARNING );
\r
9463 // Continue on to input section.
\r
9467 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9469 // Setup parameters.
\r
9470 if ( stream_.doConvertBuffer[1] ) {
\r
9471 buffer = stream_.deviceBuffer;
\r
9472 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9473 format = stream_.deviceFormat[1];
\r
9476 buffer = stream_.userBuffer[1];
\r
9477 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9478 format = stream_.userFormat;
\r
9481 // Read samples from device.
\r
9482 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9484 if ( result == -1 ) {
\r
9485 // We'll assume this is an overrun, though there isn't a
\r
9486 // specific means for determining that.
\r
9487 handle->xrun[1] = true;
\r
9488 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9489 error( RtAudioError::WARNING );
\r
9493 // Do byte swapping if necessary.
\r
9494 if ( stream_.doByteSwap[1] )
\r
9495 byteSwapBuffer( buffer, samples, format );
\r
9497 // Do buffer conversion if necessary.
\r
9498 if ( stream_.doConvertBuffer[1] )
\r
9499 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9503 MUTEX_UNLOCK( &stream_.mutex );
\r
9505 RtApi::tickStreamTime();
\r
9506 if ( doStopStream == 1 ) this->stopStream();
\r
9509 static void *ossCallbackHandler( void *ptr )
\r
9511 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9512 RtApiOss *object = (RtApiOss *) info->object;
\r
9513 bool *isRunning = &info->isRunning;
\r
9515 while ( *isRunning == true ) {
\r
9516 pthread_testcancel();
\r
9517 object->callbackEvent();
\r
9520 pthread_exit( NULL );
\r
9523 //******************** End of __LINUX_OSS__ *********************//
\r
9527 // *************************************************** //
\r
9529 // Protected common (OS-independent) RtAudio methods.
\r
9531 // *************************************************** //
\r
9533 // This method can be modified to control the behavior of error
\r
9534 // message printing.
\r
9535 void RtApi :: error( RtAudioError::Type type )
\r
9537 errorStream_.str(""); // clear the ostringstream
\r
9539 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9540 if ( errorCallback ) {
\r
9541 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9543 if ( firstErrorOccurred_ )
\r
9546 firstErrorOccurred_ = true;
\r
9547 const std::string errorMessage = errorText_;
\r
9549 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9550 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9554 errorCallback( type, errorMessage );
\r
9555 firstErrorOccurred_ = false;
\r
9559 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9560 std::cerr << '\n' << errorText_ << "\n\n";
\r
9561 else if ( type != RtAudioError::WARNING )
\r
9562 throw( RtAudioError( errorText_, type ) );
\r
9565 void RtApi :: verifyStream()
\r
9567 if ( stream_.state == STREAM_CLOSED ) {
\r
9568 errorText_ = "RtApi:: a stream is not open!";
\r
9569 error( RtAudioError::INVALID_USE );
\r
9573 void RtApi :: clearStreamInfo()
\r
9575 stream_.mode = UNINITIALIZED;
\r
9576 stream_.state = STREAM_CLOSED;
\r
9577 stream_.sampleRate = 0;
\r
9578 stream_.bufferSize = 0;
\r
9579 stream_.nBuffers = 0;
\r
9580 stream_.userFormat = 0;
\r
9581 stream_.userInterleaved = true;
\r
9582 stream_.streamTime = 0.0;
\r
9583 stream_.apiHandle = 0;
\r
9584 stream_.deviceBuffer = 0;
\r
9585 stream_.callbackInfo.callback = 0;
\r
9586 stream_.callbackInfo.userData = 0;
\r
9587 stream_.callbackInfo.isRunning = false;
\r
9588 stream_.callbackInfo.errorCallback = 0;
\r
9589 for ( int i=0; i<2; i++ ) {
\r
9590 stream_.device[i] = 11111;
\r
9591 stream_.doConvertBuffer[i] = false;
\r
9592 stream_.deviceInterleaved[i] = true;
\r
9593 stream_.doByteSwap[i] = false;
\r
9594 stream_.nUserChannels[i] = 0;
\r
9595 stream_.nDeviceChannels[i] = 0;
\r
9596 stream_.channelOffset[i] = 0;
\r
9597 stream_.deviceFormat[i] = 0;
\r
9598 stream_.latency[i] = 0;
\r
9599 stream_.userBuffer[i] = 0;
\r
9600 stream_.convertInfo[i].channels = 0;
\r
9601 stream_.convertInfo[i].inJump = 0;
\r
9602 stream_.convertInfo[i].outJump = 0;
\r
9603 stream_.convertInfo[i].inFormat = 0;
\r
9604 stream_.convertInfo[i].outFormat = 0;
\r
9605 stream_.convertInfo[i].inOffset.clear();
\r
9606 stream_.convertInfo[i].outOffset.clear();
\r
9610 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9612 if ( format == RTAUDIO_SINT16 )
\r
9614 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9616 else if ( format == RTAUDIO_FLOAT64 )
\r
9618 else if ( format == RTAUDIO_SINT24 )
\r
9620 else if ( format == RTAUDIO_SINT8 )
\r
9623 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9624 error( RtAudioError::WARNING );
\r
9629 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9631 if ( mode == INPUT ) { // convert device to user buffer
\r
9632 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9633 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9634 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9635 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9637 else { // convert user to device buffer
\r
9638 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9639 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9640 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9641 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9644 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9645 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9647 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9649 // Set up the interleave/deinterleave offsets.
\r
9650 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9651 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9652 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9653 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9654 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9655 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9656 stream_.convertInfo[mode].inJump = 1;
\r
9660 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9661 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9662 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9663 stream_.convertInfo[mode].outJump = 1;
\r
9667 else { // no (de)interleaving
\r
9668 if ( stream_.userInterleaved ) {
\r
9669 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9670 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9671 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9675 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9676 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9677 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9678 stream_.convertInfo[mode].inJump = 1;
\r
9679 stream_.convertInfo[mode].outJump = 1;
\r
9684 // Add channel offset.
\r
9685 if ( firstChannel > 0 ) {
\r
9686 if ( stream_.deviceInterleaved[mode] ) {
\r
9687 if ( mode == OUTPUT ) {
\r
9688 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9689 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9692 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9693 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9697 if ( mode == OUTPUT ) {
\r
9698 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9699 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9702 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9703 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9709 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9711 // This function does format conversion, input/output channel compensation, and
\r
9712 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9713 // the lower three bytes of a 32-bit integer.
\r
9715 // Clear our device buffer when in/out duplex device channels are different
\r
9716 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9717 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9718 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9721 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9723 Float64 *out = (Float64 *)outBuffer;
\r
9725 if (info.inFormat == RTAUDIO_SINT8) {
\r
9726 signed char *in = (signed char *)inBuffer;
\r
9727 scale = 1.0 / 127.5;
\r
9728 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9729 for (j=0; j<info.channels; j++) {
\r
9730 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9731 out[info.outOffset[j]] += 0.5;
\r
9732 out[info.outOffset[j]] *= scale;
\r
9734 in += info.inJump;
\r
9735 out += info.outJump;
\r
9738 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9739 Int16 *in = (Int16 *)inBuffer;
\r
9740 scale = 1.0 / 32767.5;
\r
9741 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9742 for (j=0; j<info.channels; j++) {
\r
9743 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9744 out[info.outOffset[j]] += 0.5;
\r
9745 out[info.outOffset[j]] *= scale;
\r
9747 in += info.inJump;
\r
9748 out += info.outJump;
\r
9751 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9752 Int24 *in = (Int24 *)inBuffer;
\r
9753 scale = 1.0 / 8388607.5;
\r
9754 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9755 for (j=0; j<info.channels; j++) {
\r
9756 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9757 out[info.outOffset[j]] += 0.5;
\r
9758 out[info.outOffset[j]] *= scale;
\r
9760 in += info.inJump;
\r
9761 out += info.outJump;
\r
9764 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9765 Int32 *in = (Int32 *)inBuffer;
\r
9766 scale = 1.0 / 2147483647.5;
\r
9767 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9768 for (j=0; j<info.channels; j++) {
\r
9769 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9770 out[info.outOffset[j]] += 0.5;
\r
9771 out[info.outOffset[j]] *= scale;
\r
9773 in += info.inJump;
\r
9774 out += info.outJump;
\r
9777 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9778 Float32 *in = (Float32 *)inBuffer;
\r
9779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9780 for (j=0; j<info.channels; j++) {
\r
9781 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9783 in += info.inJump;
\r
9784 out += info.outJump;
\r
9787 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9788 // Channel compensation and/or (de)interleaving only.
\r
9789 Float64 *in = (Float64 *)inBuffer;
\r
9790 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9791 for (j=0; j<info.channels; j++) {
\r
9792 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9794 in += info.inJump;
\r
9795 out += info.outJump;
\r
9799 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9801 Float32 *out = (Float32 *)outBuffer;
\r
9803 if (info.inFormat == RTAUDIO_SINT8) {
\r
9804 signed char *in = (signed char *)inBuffer;
\r
9805 scale = (Float32) ( 1.0 / 127.5 );
\r
9806 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9807 for (j=0; j<info.channels; j++) {
\r
9808 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9809 out[info.outOffset[j]] += 0.5;
\r
9810 out[info.outOffset[j]] *= scale;
\r
9812 in += info.inJump;
\r
9813 out += info.outJump;
\r
9816 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9817 Int16 *in = (Int16 *)inBuffer;
\r
9818 scale = (Float32) ( 1.0 / 32767.5 );
\r
9819 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9820 for (j=0; j<info.channels; j++) {
\r
9821 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9822 out[info.outOffset[j]] += 0.5;
\r
9823 out[info.outOffset[j]] *= scale;
\r
9825 in += info.inJump;
\r
9826 out += info.outJump;
\r
9829 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9830 Int24 *in = (Int24 *)inBuffer;
\r
9831 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9832 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9833 for (j=0; j<info.channels; j++) {
\r
9834 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9835 out[info.outOffset[j]] += 0.5;
\r
9836 out[info.outOffset[j]] *= scale;
\r
9838 in += info.inJump;
\r
9839 out += info.outJump;
\r
9842 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9843 Int32 *in = (Int32 *)inBuffer;
\r
9844 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9845 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9846 for (j=0; j<info.channels; j++) {
\r
9847 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9848 out[info.outOffset[j]] += 0.5;
\r
9849 out[info.outOffset[j]] *= scale;
\r
9851 in += info.inJump;
\r
9852 out += info.outJump;
\r
9855 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9856 // Channel compensation and/or (de)interleaving only.
\r
9857 Float32 *in = (Float32 *)inBuffer;
\r
9858 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9859 for (j=0; j<info.channels; j++) {
\r
9860 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9862 in += info.inJump;
\r
9863 out += info.outJump;
\r
9866 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9867 Float64 *in = (Float64 *)inBuffer;
\r
9868 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9869 for (j=0; j<info.channels; j++) {
\r
9870 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9872 in += info.inJump;
\r
9873 out += info.outJump;
\r
9877 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9878 Int32 *out = (Int32 *)outBuffer;
\r
9879 if (info.inFormat == RTAUDIO_SINT8) {
\r
9880 signed char *in = (signed char *)inBuffer;
\r
9881 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9882 for (j=0; j<info.channels; j++) {
\r
9883 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9884 out[info.outOffset[j]] <<= 24;
\r
9886 in += info.inJump;
\r
9887 out += info.outJump;
\r
9890 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9891 Int16 *in = (Int16 *)inBuffer;
\r
9892 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9893 for (j=0; j<info.channels; j++) {
\r
9894 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9895 out[info.outOffset[j]] <<= 16;
\r
9897 in += info.inJump;
\r
9898 out += info.outJump;
\r
9901 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9902 Int24 *in = (Int24 *)inBuffer;
\r
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9904 for (j=0; j<info.channels; j++) {
\r
9905 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9906 out[info.outOffset[j]] <<= 8;
\r
9908 in += info.inJump;
\r
9909 out += info.outJump;
\r
9912 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9913 // Channel compensation and/or (de)interleaving only.
\r
9914 Int32 *in = (Int32 *)inBuffer;
\r
9915 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9916 for (j=0; j<info.channels; j++) {
\r
9917 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9919 in += info.inJump;
\r
9920 out += info.outJump;
\r
9923 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9924 Float32 *in = (Float32 *)inBuffer;
\r
9925 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9926 for (j=0; j<info.channels; j++) {
\r
9927 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9929 in += info.inJump;
\r
9930 out += info.outJump;
\r
9933 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9934 Float64 *in = (Float64 *)inBuffer;
\r
9935 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9936 for (j=0; j<info.channels; j++) {
\r
9937 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9939 in += info.inJump;
\r
9940 out += info.outJump;
\r
9944 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9945 Int24 *out = (Int24 *)outBuffer;
\r
9946 if (info.inFormat == RTAUDIO_SINT8) {
\r
9947 signed char *in = (signed char *)inBuffer;
\r
9948 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9949 for (j=0; j<info.channels; j++) {
\r
9950 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9951 //out[info.outOffset[j]] <<= 16;
\r
9953 in += info.inJump;
\r
9954 out += info.outJump;
\r
9957 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9958 Int16 *in = (Int16 *)inBuffer;
\r
9959 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9960 for (j=0; j<info.channels; j++) {
\r
9961 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9962 //out[info.outOffset[j]] <<= 8;
\r
9964 in += info.inJump;
\r
9965 out += info.outJump;
\r
9968 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9969 // Channel compensation and/or (de)interleaving only.
\r
9970 Int24 *in = (Int24 *)inBuffer;
\r
9971 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9972 for (j=0; j<info.channels; j++) {
\r
9973 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9975 in += info.inJump;
\r
9976 out += info.outJump;
\r
9979 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9980 Int32 *in = (Int32 *)inBuffer;
\r
9981 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9982 for (j=0; j<info.channels; j++) {
\r
9983 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9984 //out[info.outOffset[j]] >>= 8;
\r
9986 in += info.inJump;
\r
9987 out += info.outJump;
\r
9990 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9991 Float32 *in = (Float32 *)inBuffer;
\r
9992 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9993 for (j=0; j<info.channels; j++) {
\r
9994 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9996 in += info.inJump;
\r
9997 out += info.outJump;
\r
10000 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10001 Float64 *in = (Float64 *)inBuffer;
\r
10002 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10003 for (j=0; j<info.channels; j++) {
\r
10004 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10006 in += info.inJump;
\r
10007 out += info.outJump;
\r
10011 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10012 Int16 *out = (Int16 *)outBuffer;
\r
10013 if (info.inFormat == RTAUDIO_SINT8) {
\r
10014 signed char *in = (signed char *)inBuffer;
\r
10015 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10016 for (j=0; j<info.channels; j++) {
\r
10017 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10018 out[info.outOffset[j]] <<= 8;
\r
10020 in += info.inJump;
\r
10021 out += info.outJump;
\r
10024 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10025 // Channel compensation and/or (de)interleaving only.
\r
10026 Int16 *in = (Int16 *)inBuffer;
\r
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10028 for (j=0; j<info.channels; j++) {
\r
10029 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10031 in += info.inJump;
\r
10032 out += info.outJump;
\r
10035 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10036 Int24 *in = (Int24 *)inBuffer;
\r
10037 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10038 for (j=0; j<info.channels; j++) {
\r
10039 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10041 in += info.inJump;
\r
10042 out += info.outJump;
\r
10045 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10046 Int32 *in = (Int32 *)inBuffer;
\r
10047 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10048 for (j=0; j<info.channels; j++) {
\r
10049 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10051 in += info.inJump;
\r
10052 out += info.outJump;
\r
10055 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10056 Float32 *in = (Float32 *)inBuffer;
\r
10057 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10058 for (j=0; j<info.channels; j++) {
\r
10059 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10061 in += info.inJump;
\r
10062 out += info.outJump;
\r
10065 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10066 Float64 *in = (Float64 *)inBuffer;
\r
10067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10068 for (j=0; j<info.channels; j++) {
\r
10069 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10071 in += info.inJump;
\r
10072 out += info.outJump;
\r
10076 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10077 signed char *out = (signed char *)outBuffer;
\r
10078 if (info.inFormat == RTAUDIO_SINT8) {
\r
10079 // Channel compensation and/or (de)interleaving only.
\r
10080 signed char *in = (signed char *)inBuffer;
\r
10081 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10082 for (j=0; j<info.channels; j++) {
\r
10083 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10085 in += info.inJump;
\r
10086 out += info.outJump;
\r
10089 if (info.inFormat == RTAUDIO_SINT16) {
\r
10090 Int16 *in = (Int16 *)inBuffer;
\r
10091 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10092 for (j=0; j<info.channels; j++) {
\r
10093 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10095 in += info.inJump;
\r
10096 out += info.outJump;
\r
10099 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10100 Int24 *in = (Int24 *)inBuffer;
\r
10101 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10102 for (j=0; j<info.channels; j++) {
\r
10103 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10105 in += info.inJump;
\r
10106 out += info.outJump;
\r
10109 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10110 Int32 *in = (Int32 *)inBuffer;
\r
10111 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10112 for (j=0; j<info.channels; j++) {
\r
10113 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10115 in += info.inJump;
\r
10116 out += info.outJump;
\r
10119 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10120 Float32 *in = (Float32 *)inBuffer;
\r
10121 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10122 for (j=0; j<info.channels; j++) {
\r
10123 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10125 in += info.inJump;
\r
10126 out += info.outJump;
\r
10129 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10130 Float64 *in = (Float64 *)inBuffer;
\r
10131 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10132 for (j=0; j<info.channels; j++) {
\r
10133 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10135 in += info.inJump;
\r
10136 out += info.outJump;
\r
10142 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10143 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10144 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10146 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10152 if ( format == RTAUDIO_SINT16 ) {
\r
10153 for ( unsigned int i=0; i<samples; i++ ) {
\r
10154 // Swap 1st and 2nd bytes.
\r
10156 *(ptr) = *(ptr+1);
\r
10159 // Increment 2 bytes.
\r
10163 else if ( format == RTAUDIO_SINT32 ||
\r
10164 format == RTAUDIO_FLOAT32 ) {
\r
10165 for ( unsigned int i=0; i<samples; i++ ) {
\r
10166 // Swap 1st and 4th bytes.
\r
10168 *(ptr) = *(ptr+3);
\r
10171 // Swap 2nd and 3rd bytes.
\r
10174 *(ptr) = *(ptr+1);
\r
10177 // Increment 3 more bytes.
\r
10181 else if ( format == RTAUDIO_SINT24 ) {
\r
10182 for ( unsigned int i=0; i<samples; i++ ) {
\r
10183 // Swap 1st and 3rd bytes.
\r
10185 *(ptr) = *(ptr+2);
\r
10188 // Increment 2 more bytes.
\r
10192 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10193 for ( unsigned int i=0; i<samples; i++ ) {
\r
10194 // Swap 1st and 8th bytes
\r
10196 *(ptr) = *(ptr+7);
\r
10199 // Swap 2nd and 7th bytes
\r
10202 *(ptr) = *(ptr+5);
\r
10205 // Swap 3rd and 6th bytes
\r
10208 *(ptr) = *(ptr+3);
\r
10211 // Swap 4th and 5th bytes
\r
10214 *(ptr) = *(ptr+1);
\r
10217 // Increment 5 more bytes.
\r
10223 // Indentation settings for Vim and Emacs
\r
10225 // Local Variables:
\r
10226 // c-basic-offset: 2
\r
10227 // indent-tabs-mode: nil
\r
10230 // vim: et sts=2 sw=2
\r