1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1410 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1411 kAudioObjectPropertyScopeGlobal,
\r
1412 kAudioObjectPropertyElementMaster };
\r
1414 property.mSelector = kAudioDeviceProcessorOverload;
\r
1415 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1416 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1417 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1418 error( RtAudioError::WARNING );
\r
1421 if ( stream_.state == STREAM_RUNNING )
\r
1422 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1423 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1424 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1426 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1427 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1431 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1433 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1434 kAudioObjectPropertyScopeGlobal,
\r
1435 kAudioObjectPropertyElementMaster };
\r
1437 property.mSelector = kAudioDeviceProcessorOverload;
\r
1438 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1439 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1440 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1441 error( RtAudioError::WARNING );
\r
1444 if ( stream_.state == STREAM_RUNNING )
\r
1445 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1446 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1447 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1449 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1450 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1454 for ( int i=0; i<2; i++ ) {
\r
1455 if ( stream_.userBuffer[i] ) {
\r
1456 free( stream_.userBuffer[i] );
\r
1457 stream_.userBuffer[i] = 0;
\r
1461 if ( stream_.deviceBuffer ) {
\r
1462 free( stream_.deviceBuffer );
\r
1463 stream_.deviceBuffer = 0;
\r
1466 // Destroy pthread condition variable.
\r
1467 pthread_cond_destroy( &handle->condition );
\r
1469 stream_.apiHandle = 0;
\r
1471 stream_.mode = UNINITIALIZED;
\r
1472 stream_.state = STREAM_CLOSED;
\r
1475 void RtApiCore :: startStream( void )
\r
1478 if ( stream_.state == STREAM_RUNNING ) {
\r
1479 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1480 error( RtAudioError::WARNING );
\r
1484 OSStatus result = noErr;
\r
1485 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1488 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1489 if ( result != noErr ) {
\r
1490 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1491 errorText_ = errorStream_.str();
\r
1496 if ( stream_.mode == INPUT ||
\r
1497 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1499 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1500 if ( result != noErr ) {
\r
1501 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1502 errorText_ = errorStream_.str();
\r
1507 handle->drainCounter = 0;
\r
1508 handle->internalDrain = false;
\r
1509 stream_.state = STREAM_RUNNING;
\r
1512 if ( result == noErr ) return;
\r
1513 error( RtAudioError::SYSTEM_ERROR );
\r
1516 void RtApiCore :: stopStream( void )
\r
1519 if ( stream_.state == STREAM_STOPPED ) {
\r
1520 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1521 error( RtAudioError::WARNING );
\r
1525 OSStatus result = noErr;
\r
1526 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1529 if ( handle->drainCounter == 0 ) {
\r
1530 handle->drainCounter = 2;
\r
1531 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1534 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1535 if ( result != noErr ) {
\r
1536 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1537 errorText_ = errorStream_.str();
\r
1542 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1544 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1545 if ( result != noErr ) {
\r
1546 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1547 errorText_ = errorStream_.str();
\r
1552 stream_.state = STREAM_STOPPED;
\r
1555 if ( result == noErr ) return;
\r
1556 error( RtAudioError::SYSTEM_ERROR );
\r
1559 void RtApiCore :: abortStream( void )
\r
1562 if ( stream_.state == STREAM_STOPPED ) {
\r
1563 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1564 error( RtAudioError::WARNING );
\r
1568 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1569 handle->drainCounter = 2;
\r
1574 // This function will be called by a spawned thread when the user
\r
1575 // callback function signals that the stream should be stopped or
\r
1576 // aborted. It is better to handle it this way because the
\r
1577 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1578 // function is called.
\r
1579 static void *coreStopStream( void *ptr )
\r
1581 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1582 RtApiCore *object = (RtApiCore *) info->object;
\r
1584 object->stopStream();
\r
1585 pthread_exit( NULL );
\r
1588 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1589 const AudioBufferList *inBufferList,
\r
1590 const AudioBufferList *outBufferList )
\r
1592 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1593 if ( stream_.state == STREAM_CLOSED ) {
\r
1594 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1595 error( RtAudioError::WARNING );
\r
1599 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1602 // Check if we were draining the stream and signal is finished.
\r
1603 if ( handle->drainCounter > 3 ) {
\r
1604 ThreadHandle threadId;
\r
1606 stream_.state = STREAM_STOPPING;
\r
1607 if ( handle->internalDrain == true )
\r
1608 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1609 else // external call to stopStream()
\r
1610 pthread_cond_signal( &handle->condition );
\r
1614 AudioDeviceID outputDevice = handle->id[0];
\r
1616 // Invoke user callback to get fresh output data UNLESS we are
\r
1617 // draining stream or duplex mode AND the input/output devices are
\r
1618 // different AND this function is called for the input device.
\r
1619 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1620 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1621 double streamTime = getStreamTime();
\r
1622 RtAudioStreamStatus status = 0;
\r
1623 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1624 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1625 handle->xrun[0] = false;
\r
1627 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1628 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1629 handle->xrun[1] = false;
\r
1632 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1633 stream_.bufferSize, streamTime, status, info->userData );
\r
1634 if ( cbReturnValue == 2 ) {
\r
1635 stream_.state = STREAM_STOPPING;
\r
1636 handle->drainCounter = 2;
\r
1640 else if ( cbReturnValue == 1 ) {
\r
1641 handle->drainCounter = 1;
\r
1642 handle->internalDrain = true;
\r
1646 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1648 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1650 if ( handle->nStreams[0] == 1 ) {
\r
1651 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1653 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1655 else { // fill multiple streams with zeros
\r
1656 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1657 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1659 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1663 else if ( handle->nStreams[0] == 1 ) {
\r
1664 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1665 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1666 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1668 else { // copy from user buffer
\r
1669 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0],
\r
1671 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1674 else { // fill multiple streams
\r
1675 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1676 if ( stream_.doConvertBuffer[0] ) {
\r
1677 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1678 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1681 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1682 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1683 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1684 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1685 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1688 else { // fill multiple multi-channel streams with interleaved data
\r
1689 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1690 Float32 *out, *in;
\r
1692 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1693 UInt32 inChannels = stream_.nUserChannels[0];
\r
1694 if ( stream_.doConvertBuffer[0] ) {
\r
1695 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1696 inChannels = stream_.nDeviceChannels[0];
\r
1699 if ( inInterleaved ) inOffset = 1;
\r
1700 else inOffset = stream_.bufferSize;
\r
1702 channelsLeft = inChannels;
\r
1703 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1705 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1706 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1709 // Account for possible channel offset in first stream
\r
1710 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1711 streamChannels -= stream_.channelOffset[0];
\r
1712 outJump = stream_.channelOffset[0];
\r
1716 // Account for possible unfilled channels at end of the last stream
\r
1717 if ( streamChannels > channelsLeft ) {
\r
1718 outJump = streamChannels - channelsLeft;
\r
1719 streamChannels = channelsLeft;
\r
1722 // Determine input buffer offsets and skips
\r
1723 if ( inInterleaved ) {
\r
1724 inJump = inChannels;
\r
1725 in += inChannels - channelsLeft;
\r
1729 in += (inChannels - channelsLeft) * inOffset;
\r
1732 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1733 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1734 *out++ = in[j*inOffset];
\r
1739 channelsLeft -= streamChannels;
\r
1745 // Don't bother draining input
\r
1746 if ( handle->drainCounter ) {
\r
1747 handle->drainCounter++;
\r
1751 AudioDeviceID inputDevice;
\r
1752 inputDevice = handle->id[1];
\r
1753 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1755 if ( handle->nStreams[1] == 1 ) {
\r
1756 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1757 convertBuffer( stream_.userBuffer[1],
\r
1758 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1759 stream_.convertInfo[1] );
\r
1761 else { // copy to user buffer
\r
1762 memcpy( stream_.userBuffer[1],
\r
1763 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1767 else { // read from multiple streams
\r
1768 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1769 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1771 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1772 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1773 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1774 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1775 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1778 else { // read from multiple multi-channel streams
\r
1779 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1780 Float32 *out, *in;
\r
1782 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1783 UInt32 outChannels = stream_.nUserChannels[1];
\r
1784 if ( stream_.doConvertBuffer[1] ) {
\r
1785 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1786 outChannels = stream_.nDeviceChannels[1];
\r
1789 if ( outInterleaved ) outOffset = 1;
\r
1790 else outOffset = stream_.bufferSize;
\r
1792 channelsLeft = outChannels;
\r
1793 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1795 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1796 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1799 // Account for possible channel offset in first stream
\r
1800 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1801 streamChannels -= stream_.channelOffset[1];
\r
1802 inJump = stream_.channelOffset[1];
\r
1806 // Account for possible unread channels at end of the last stream
\r
1807 if ( streamChannels > channelsLeft ) {
\r
1808 inJump = streamChannels - channelsLeft;
\r
1809 streamChannels = channelsLeft;
\r
1812 // Determine output buffer offsets and skips
\r
1813 if ( outInterleaved ) {
\r
1814 outJump = outChannels;
\r
1815 out += outChannels - channelsLeft;
\r
1819 out += (outChannels - channelsLeft) * outOffset;
\r
1822 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1823 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1824 out[j*outOffset] = *in++;
\r
1829 channelsLeft -= streamChannels;
\r
1833 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1834 convertBuffer( stream_.userBuffer[1],
\r
1835 stream_.deviceBuffer,
\r
1836 stream_.convertInfo[1] );
\r
1842 //MUTEX_UNLOCK( &stream_.mutex );
\r
1844 RtApi::tickStreamTime();
\r
1848 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1852 case kAudioHardwareNotRunningError:
\r
1853 return "kAudioHardwareNotRunningError";
\r
1855 case kAudioHardwareUnspecifiedError:
\r
1856 return "kAudioHardwareUnspecifiedError";
\r
1858 case kAudioHardwareUnknownPropertyError:
\r
1859 return "kAudioHardwareUnknownPropertyError";
\r
1861 case kAudioHardwareBadPropertySizeError:
\r
1862 return "kAudioHardwareBadPropertySizeError";
\r
1864 case kAudioHardwareIllegalOperationError:
\r
1865 return "kAudioHardwareIllegalOperationError";
\r
1867 case kAudioHardwareBadObjectError:
\r
1868 return "kAudioHardwareBadObjectError";
\r
1870 case kAudioHardwareBadDeviceError:
\r
1871 return "kAudioHardwareBadDeviceError";
\r
1873 case kAudioHardwareBadStreamError:
\r
1874 return "kAudioHardwareBadStreamError";
\r
1876 case kAudioHardwareUnsupportedOperationError:
\r
1877 return "kAudioHardwareUnsupportedOperationError";
\r
1879 case kAudioDeviceUnsupportedFormatError:
\r
1880 return "kAudioDeviceUnsupportedFormatError";
\r
1882 case kAudioDevicePermissionsError:
\r
1883 return "kAudioDevicePermissionsError";
\r
1886 return "CoreAudio unknown error";
\r
1890 //******************** End of __MACOSX_CORE__ *********************//
\r
1893 #if defined(__UNIX_JACK__)
\r
1895 // JACK is a low-latency audio server, originally written for the
\r
1896 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1897 // connect a number of different applications to an audio device, as
\r
1898 // well as allowing them to share audio between themselves.
\r
1900 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1901 // have ports connected to the server. The JACK server is typically
\r
1902 // started in a terminal as follows:
\r
1904 // .jackd -d alsa -d hw:0
\r
1906 // or through an interface program such as qjackctl. Many of the
\r
1907 // parameters normally set for a stream are fixed by the JACK server
\r
1908 // and can be specified when the JACK server is started. In
\r
1911 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1913 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1914 // frames, and number of buffers = 4. Once the server is running, it
\r
1915 // is not possible to override these values. If the values are not
\r
1916 // specified in the command-line, the JACK server uses default values.
\r
1918 // The JACK server does not have to be running when an instance of
\r
1919 // RtApiJack is created, though the function getDeviceCount() will
\r
1920 // report 0 devices found until JACK has been started. When no
\r
1921 // devices are available (i.e., the JACK server is not running), a
\r
1922 // stream cannot be opened.
\r
1924 #include <jack/jack.h>
\r
1925 #include <unistd.h>
\r
1928 // A structure to hold various information related to the Jack API
\r
1929 // implementation.
\r
1930 struct JackHandle {
\r
1931 jack_client_t *client;
\r
1932 jack_port_t **ports[2];
\r
1933 std::string deviceName[2];
\r
1935 pthread_cond_t condition;
\r
1936 int drainCounter; // Tracks callback counts when draining
\r
1937 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1940 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1943 static void jackSilentError( const char * ) {};
\r
1945 RtApiJack :: RtApiJack()
\r
1947 // Nothing to do here.
\r
1948 #if !defined(__RTAUDIO_DEBUG__)
\r
1949 // Turn off Jack's internal error reporting.
\r
1950 jack_set_error_function( &jackSilentError );
\r
1954 RtApiJack :: ~RtApiJack()
\r
1956 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1959 unsigned int RtApiJack :: getDeviceCount( void )
\r
1961 // See if we can become a jack client.
\r
1962 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1963 jack_status_t *status = NULL;
\r
1964 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1965 if ( client == 0 ) return 0;
\r
1967 const char **ports;
\r
1968 std::string port, previousPort;
\r
1969 unsigned int nChannels = 0, nDevices = 0;
\r
1970 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1972 // Parse the port names up to the first colon (:).
\r
1973 size_t iColon = 0;
\r
1975 port = (char *) ports[ nChannels ];
\r
1976 iColon = port.find(":");
\r
1977 if ( iColon != std::string::npos ) {
\r
1978 port = port.substr( 0, iColon + 1 );
\r
1979 if ( port != previousPort ) {
\r
1981 previousPort = port;
\r
1984 } while ( ports[++nChannels] );
\r
1988 jack_client_close( client );
\r
1992 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1994 RtAudio::DeviceInfo info;
\r
1995 info.probed = false;
\r
1997 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1998 jack_status_t *status = NULL;
\r
1999 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2000 if ( client == 0 ) {
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 const char **ports;
\r
2007 std::string port, previousPort;
\r
2008 unsigned int nPorts = 0, nDevices = 0;
\r
2009 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2011 // Parse the port names up to the first colon (:).
\r
2012 size_t iColon = 0;
\r
2014 port = (char *) ports[ nPorts ];
\r
2015 iColon = port.find(":");
\r
2016 if ( iColon != std::string::npos ) {
\r
2017 port = port.substr( 0, iColon );
\r
2018 if ( port != previousPort ) {
\r
2019 if ( nDevices == device ) info.name = port;
\r
2021 previousPort = port;
\r
2024 } while ( ports[++nPorts] );
\r
2028 if ( device >= nDevices ) {
\r
2029 jack_client_close( client );
\r
2030 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2031 error( RtAudioError::INVALID_USE );
\r
2035 // Get the current jack server sample rate.
\r
2036 info.sampleRates.clear();
\r
2038 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2039 info.sampleRates.push_back( info.preferredSampleRate );
\r
2041 // Count the available ports containing the client name as device
\r
2042 // channels. Jack "input ports" equal RtAudio output channels.
\r
2043 unsigned int nChannels = 0;
\r
2044 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2046 while ( ports[ nChannels ] ) nChannels++;
\r
2048 info.outputChannels = nChannels;
\r
2051 // Jack "output ports" equal RtAudio input channels.
\r
2053 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2055 while ( ports[ nChannels ] ) nChannels++;
\r
2057 info.inputChannels = nChannels;
\r
2060 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2061 jack_client_close(client);
\r
2062 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2063 error( RtAudioError::WARNING );
\r
2067 // If device opens for both playback and capture, we determine the channels.
\r
2068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2071 // Jack always uses 32-bit floats.
\r
2072 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2074 // Jack doesn't provide default devices so we'll use the first available one.
\r
2075 if ( device == 0 && info.outputChannels > 0 )
\r
2076 info.isDefaultOutput = true;
\r
2077 if ( device == 0 && info.inputChannels > 0 )
\r
2078 info.isDefaultInput = true;
\r
2080 jack_client_close(client);
\r
2081 info.probed = true;
\r
2085 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2087 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2089 RtApiJack *object = (RtApiJack *) info->object;
\r
2090 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2095 // This function will be called by a spawned thread when the Jack
\r
2096 // server signals that it is shutting down. It is necessary to handle
\r
2097 // it this way because the jackShutdown() function must return before
\r
2098 // the jack_deactivate() function (in closeStream()) will return.
\r
2099 static void *jackCloseStream( void *ptr )
\r
2101 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2102 RtApiJack *object = (RtApiJack *) info->object;
\r
2104 object->closeStream();
\r
2106 pthread_exit( NULL );
\r
2108 static void jackShutdown( void *infoPointer )
\r
2110 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2111 RtApiJack *object = (RtApiJack *) info->object;
\r
2113 // Check current stream state. If stopped, then we'll assume this
\r
2114 // was called as a result of a call to RtApiJack::stopStream (the
\r
2115 // deactivation of a client handle causes this function to be called).
\r
2116 // If not, we'll assume the Jack server is shutting down or some
\r
2117 // other problem occurred and we should close the stream.
\r
2118 if ( object->isStreamRunning() == false ) return;
\r
2120 ThreadHandle threadId;
\r
2121 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2122 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2125 static int jackXrun( void *infoPointer )
\r
2127 JackHandle *handle = (JackHandle *) infoPointer;
\r
2129 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2130 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2135 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2136 unsigned int firstChannel, unsigned int sampleRate,
\r
2137 RtAudioFormat format, unsigned int *bufferSize,
\r
2138 RtAudio::StreamOptions *options )
\r
2140 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2142 // Look for jack server and try to become a client (only do once per stream).
\r
2143 jack_client_t *client = 0;
\r
2144 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2145 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2146 jack_status_t *status = NULL;
\r
2147 if ( options && !options->streamName.empty() )
\r
2148 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2150 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2151 if ( client == 0 ) {
\r
2152 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2153 error( RtAudioError::WARNING );
\r
2158 // The handle must have been created on an earlier pass.
\r
2159 client = handle->client;
\r
2162 const char **ports;
\r
2163 std::string port, previousPort, deviceName;
\r
2164 unsigned int nPorts = 0, nDevices = 0;
\r
2165 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2167 // Parse the port names up to the first colon (:).
\r
2168 size_t iColon = 0;
\r
2170 port = (char *) ports[ nPorts ];
\r
2171 iColon = port.find(":");
\r
2172 if ( iColon != std::string::npos ) {
\r
2173 port = port.substr( 0, iColon );
\r
2174 if ( port != previousPort ) {
\r
2175 if ( nDevices == device ) deviceName = port;
\r
2177 previousPort = port;
\r
2180 } while ( ports[++nPorts] );
\r
2184 if ( device >= nDevices ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2189 // Count the available ports containing the client name as device
\r
2190 // channels. Jack "input ports" equal RtAudio output channels.
\r
2191 unsigned int nChannels = 0;
\r
2192 unsigned long flag = JackPortIsInput;
\r
2193 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2196 while ( ports[ nChannels ] ) nChannels++;
\r
2200 // Compare the jack ports for specified client to the requested number of channels.
\r
2201 if ( nChannels < (channels + firstChannel) ) {
\r
2202 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2203 errorText_ = errorStream_.str();
\r
2207 // Check the jack server sample rate.
\r
2208 unsigned int jackRate = jack_get_sample_rate( client );
\r
2209 if ( sampleRate != jackRate ) {
\r
2210 jack_client_close( client );
\r
2211 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2212 errorText_ = errorStream_.str();
\r
2215 stream_.sampleRate = jackRate;
\r
2217 // Get the latency of the JACK port.
\r
2218 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2219 if ( ports[ firstChannel ] ) {
\r
2220 // Added by Ge Wang
\r
2221 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2222 // the range (usually the min and max are equal)
\r
2223 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2224 // get the latency range
\r
2225 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2226 // be optimistic, use the min!
\r
2227 stream_.latency[mode] = latrange.min;
\r
2228 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2232 // The jack server always uses 32-bit floating-point data.
\r
2233 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2234 stream_.userFormat = format;
\r
2236 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2237 else stream_.userInterleaved = true;
\r
2239 // Jack always uses non-interleaved buffers.
\r
2240 stream_.deviceInterleaved[mode] = false;
\r
2242 // Jack always provides host byte-ordered data.
\r
2243 stream_.doByteSwap[mode] = false;
\r
2245 // Get the buffer size. The buffer size and number of buffers
\r
2246 // (periods) is set when the jack server is started.
\r
2247 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2248 *bufferSize = stream_.bufferSize;
\r
2250 stream_.nDeviceChannels[mode] = channels;
\r
2251 stream_.nUserChannels[mode] = channels;
\r
2253 // Set flags for buffer conversion.
\r
2254 stream_.doConvertBuffer[mode] = false;
\r
2255 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2256 stream_.doConvertBuffer[mode] = true;
\r
2257 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2258 stream_.nUserChannels[mode] > 1 )
\r
2259 stream_.doConvertBuffer[mode] = true;
\r
2261 // Allocate our JackHandle structure for the stream.
\r
2262 if ( handle == 0 ) {
\r
2264 handle = new JackHandle;
\r
2266 catch ( std::bad_alloc& ) {
\r
2267 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2271 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2272 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2275 stream_.apiHandle = (void *) handle;
\r
2276 handle->client = client;
\r
2278 handle->deviceName[mode] = deviceName;
\r
2280 // Allocate necessary internal buffers.
\r
2281 unsigned long bufferBytes;
\r
2282 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2283 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2284 if ( stream_.userBuffer[mode] == NULL ) {
\r
2285 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2289 if ( stream_.doConvertBuffer[mode] ) {
\r
2291 bool makeBuffer = true;
\r
2292 if ( mode == OUTPUT )
\r
2293 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2294 else { // mode == INPUT
\r
2295 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2296 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2297 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2298 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2302 if ( makeBuffer ) {
\r
2303 bufferBytes *= *bufferSize;
\r
2304 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2305 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2306 if ( stream_.deviceBuffer == NULL ) {
\r
2307 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2313 // Allocate memory for the Jack ports (channels) identifiers.
\r
2314 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2315 if ( handle->ports[mode] == NULL ) {
\r
2316 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2320 stream_.device[mode] = device;
\r
2321 stream_.channelOffset[mode] = firstChannel;
\r
2322 stream_.state = STREAM_STOPPED;
\r
2323 stream_.callbackInfo.object = (void *) this;
\r
2325 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2326 // We had already set up the stream for output.
\r
2327 stream_.mode = DUPLEX;
\r
2329 stream_.mode = mode;
\r
2330 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2331 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2332 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2335 // Register our ports.
\r
2337 if ( mode == OUTPUT ) {
\r
2338 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2339 snprintf( label, 64, "outport %d", i );
\r
2340 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2341 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2345 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2346 snprintf( label, 64, "inport %d", i );
\r
2347 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2348 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2352 // Setup the buffer conversion information structure. We don't use
\r
2353 // buffers to do channel offsets, so we override that parameter
\r
2355 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2361 pthread_cond_destroy( &handle->condition );
\r
2362 jack_client_close( handle->client );
\r
2364 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2365 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2368 stream_.apiHandle = 0;
\r
2371 for ( int i=0; i<2; i++ ) {
\r
2372 if ( stream_.userBuffer[i] ) {
\r
2373 free( stream_.userBuffer[i] );
\r
2374 stream_.userBuffer[i] = 0;
\r
2378 if ( stream_.deviceBuffer ) {
\r
2379 free( stream_.deviceBuffer );
\r
2380 stream_.deviceBuffer = 0;
\r
2386 void RtApiJack :: closeStream( void )
\r
2388 if ( stream_.state == STREAM_CLOSED ) {
\r
2389 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2390 error( RtAudioError::WARNING );
\r
2394 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2397 if ( stream_.state == STREAM_RUNNING )
\r
2398 jack_deactivate( handle->client );
\r
2400 jack_client_close( handle->client );
\r
2404 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2405 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2406 pthread_cond_destroy( &handle->condition );
\r
2408 stream_.apiHandle = 0;
\r
2411 for ( int i=0; i<2; i++ ) {
\r
2412 if ( stream_.userBuffer[i] ) {
\r
2413 free( stream_.userBuffer[i] );
\r
2414 stream_.userBuffer[i] = 0;
\r
2418 if ( stream_.deviceBuffer ) {
\r
2419 free( stream_.deviceBuffer );
\r
2420 stream_.deviceBuffer = 0;
\r
2423 stream_.mode = UNINITIALIZED;
\r
2424 stream_.state = STREAM_CLOSED;
\r
2427 void RtApiJack :: startStream( void )
\r
2430 if ( stream_.state == STREAM_RUNNING ) {
\r
2431 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2432 error( RtAudioError::WARNING );
\r
2436 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2437 int result = jack_activate( handle->client );
\r
2439 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2443 const char **ports;
\r
2445 // Get the list of available ports.
\r
2446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2454 // Now make the port connections. Since RtAudio wasn't designed to
\r
2455 // allow the user to select particular channels of a device, we'll
\r
2456 // just open the first "nChannels" ports with offset.
\r
2457 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2459 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2460 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2463 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2470 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2472 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2473 if ( ports == NULL) {
\r
2474 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2478 // Now make the port connections. See note above.
\r
2479 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2481 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2482 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2485 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2492 handle->drainCounter = 0;
\r
2493 handle->internalDrain = false;
\r
2494 stream_.state = STREAM_RUNNING;
\r
2497 if ( result == 0 ) return;
\r
2498 error( RtAudioError::SYSTEM_ERROR );
\r
2501 void RtApiJack :: stopStream( void )
\r
2504 if ( stream_.state == STREAM_STOPPED ) {
\r
2505 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2506 error( RtAudioError::WARNING );
\r
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2511 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2513 if ( handle->drainCounter == 0 ) {
\r
2514 handle->drainCounter = 2;
\r
2515 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2519 jack_deactivate( handle->client );
\r
2520 stream_.state = STREAM_STOPPED;
\r
2523 void RtApiJack :: abortStream( void )
\r
2526 if ( stream_.state == STREAM_STOPPED ) {
\r
2527 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2528 error( RtAudioError::WARNING );
\r
2532 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2533 handle->drainCounter = 2;
\r
2538 // This function will be called by a spawned thread when the user
\r
2539 // callback function signals that the stream should be stopped or
\r
2540 // aborted. It is necessary to handle it this way because the
\r
2541 // callbackEvent() function must return before the jack_deactivate()
\r
2542 // function will return.
\r
2543 static void *jackStopStream( void *ptr )
\r
2545 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2546 RtApiJack *object = (RtApiJack *) info->object;
\r
2548 object->stopStream();
\r
2549 pthread_exit( NULL );
\r
2552 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2554 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2555 if ( stream_.state == STREAM_CLOSED ) {
\r
2556 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2557 error( RtAudioError::WARNING );
\r
2560 if ( stream_.bufferSize != nframes ) {
\r
2561 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2562 error( RtAudioError::WARNING );
\r
2566 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2567 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2569 // Check if we were draining the stream and signal is finished.
\r
2570 if ( handle->drainCounter > 3 ) {
\r
2571 ThreadHandle threadId;
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 if ( handle->internalDrain == true )
\r
2575 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2577 pthread_cond_signal( &handle->condition );
\r
2581 // Invoke user callback first, to get fresh output data.
\r
2582 if ( handle->drainCounter == 0 ) {
\r
2583 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2584 double streamTime = getStreamTime();
\r
2585 RtAudioStreamStatus status = 0;
\r
2586 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2587 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2588 handle->xrun[0] = false;
\r
2590 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2591 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2592 handle->xrun[1] = false;
\r
2594 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2595 stream_.bufferSize, streamTime, status, info->userData );
\r
2596 if ( cbReturnValue == 2 ) {
\r
2597 stream_.state = STREAM_STOPPING;
\r
2598 handle->drainCounter = 2;
\r
2600 pthread_create( &id, NULL, jackStopStream, info );
\r
2603 else if ( cbReturnValue == 1 ) {
\r
2604 handle->drainCounter = 1;
\r
2605 handle->internalDrain = true;
\r
2609 jack_default_audio_sample_t *jackbuffer;
\r
2610 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2611 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2613 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2615 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2616 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2617 memset( jackbuffer, 0, bufferBytes );
\r
2621 else if ( stream_.doConvertBuffer[0] ) {
\r
2623 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2627 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2630 else { // no buffer conversion
\r
2631 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2632 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2633 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2638 // Don't bother draining input
\r
2639 if ( handle->drainCounter ) {
\r
2640 handle->drainCounter++;
\r
2644 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2646 if ( stream_.doConvertBuffer[1] ) {
\r
2647 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2648 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2649 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2651 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2653 else { // no buffer conversion
\r
2654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2655 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2656 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2662 RtApi::tickStreamTime();
\r
2665 //******************** End of __UNIX_JACK__ *********************//
\r
2668 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2670 // The ASIO API is designed around a callback scheme, so this
\r
2671 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2672 // Jack. The primary constraint with ASIO is that it only allows
\r
2673 // access to a single driver at a time. Thus, it is not possible to
\r
2674 // have more than one simultaneous RtAudio stream.
\r
2676 // This implementation also requires a number of external ASIO files
\r
2677 // and a few global variables. The ASIO callback scheme does not
\r
2678 // allow for the passing of user data, so we must create a global
\r
2679 // pointer to our callbackInfo structure.
\r
2681 // On unix systems, we make use of a pthread condition variable.
\r
2682 // Since there is no equivalent in Windows, I hacked something based
\r
2683 // on information found in
\r
2684 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2686 #include "asiosys.h"
\r
2688 #include "iasiothiscallresolver.h"
\r
2689 #include "asiodrivers.h"
\r
2692 static AsioDrivers drivers;
\r
2693 static ASIOCallbacks asioCallbacks;
\r
2694 static ASIODriverInfo driverInfo;
\r
2695 static CallbackInfo *asioCallbackInfo;
\r
2696 static bool asioXRun;
\r
2698 struct AsioHandle {
\r
2699 int drainCounter; // Tracks callback counts when draining
\r
2700 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2701 ASIOBufferInfo *bufferInfos;
\r
2705 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2708 // Function declarations (definitions at end of section)
\r
2709 static const char* getAsioErrorString( ASIOError result );
\r
2710 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2711 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2713 RtApiAsio :: RtApiAsio()
\r
2715 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2716 // CoInitialize beforehand, but it must be for appartment threading
\r
2717 // (in which case, CoInitilialize will return S_FALSE here).
\r
2718 coInitialized_ = false;
\r
2719 HRESULT hr = CoInitialize( NULL );
\r
2720 if ( FAILED(hr) ) {
\r
2721 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2722 error( RtAudioError::WARNING );
\r
2724 coInitialized_ = true;
\r
2726 drivers.removeCurrentDriver();
\r
2727 driverInfo.asioVersion = 2;
\r
2729 // See note in DirectSound implementation about GetDesktopWindow().
\r
2730 driverInfo.sysRef = GetForegroundWindow();
\r
2733 RtApiAsio :: ~RtApiAsio()
\r
2735 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2736 if ( coInitialized_ ) CoUninitialize();
\r
2739 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2741 return (unsigned int) drivers.asioGetNumDev();
\r
2744 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2746 RtAudio::DeviceInfo info;
\r
2747 info.probed = false;
\r
2750 unsigned int nDevices = getDeviceCount();
\r
2751 if ( nDevices == 0 ) {
\r
2752 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2753 error( RtAudioError::INVALID_USE );
\r
2757 if ( device >= nDevices ) {
\r
2758 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2759 error( RtAudioError::INVALID_USE );
\r
2763 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2764 if ( stream_.state != STREAM_CLOSED ) {
\r
2765 if ( device >= devices_.size() ) {
\r
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2767 error( RtAudioError::WARNING );
\r
2770 return devices_[ device ];
\r
2773 char driverName[32];
\r
2774 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2775 if ( result != ASE_OK ) {
\r
2776 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2777 errorText_ = errorStream_.str();
\r
2778 error( RtAudioError::WARNING );
\r
2782 info.name = driverName;
\r
2784 if ( !drivers.loadDriver( driverName ) ) {
\r
2785 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2786 errorText_ = errorStream_.str();
\r
2787 error( RtAudioError::WARNING );
\r
2791 result = ASIOInit( &driverInfo );
\r
2792 if ( result != ASE_OK ) {
\r
2793 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2794 errorText_ = errorStream_.str();
\r
2795 error( RtAudioError::WARNING );
\r
2799 // Determine the device channel information.
\r
2800 long inputChannels, outputChannels;
\r
2801 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2802 if ( result != ASE_OK ) {
\r
2803 drivers.removeCurrentDriver();
\r
2804 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2805 errorText_ = errorStream_.str();
\r
2806 error( RtAudioError::WARNING );
\r
2810 info.outputChannels = outputChannels;
\r
2811 info.inputChannels = inputChannels;
\r
2812 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2813 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2815 // Determine the supported sample rates.
\r
2816 info.sampleRates.clear();
\r
2817 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2818 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2819 if ( result == ASE_OK ) {
\r
2820 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2822 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2823 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2827 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2828 ASIOChannelInfo channelInfo;
\r
2829 channelInfo.channel = 0;
\r
2830 channelInfo.isInput = true;
\r
2831 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2832 result = ASIOGetChannelInfo( &channelInfo );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2837 error( RtAudioError::WARNING );
\r
2841 info.nativeFormats = 0;
\r
2842 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2843 info.nativeFormats |= RTAUDIO_SINT16;
\r
2844 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2845 info.nativeFormats |= RTAUDIO_SINT32;
\r
2846 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2847 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2848 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2849 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2850 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2851 info.nativeFormats |= RTAUDIO_SINT24;
\r
2853 if ( info.outputChannels > 0 )
\r
2854 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2855 if ( info.inputChannels > 0 )
\r
2856 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2858 info.probed = true;
\r
2859 drivers.removeCurrentDriver();
\r
2863 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2865 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2866 object->callbackEvent( index );
\r
2869 void RtApiAsio :: saveDeviceInfo( void )
\r
2873 unsigned int nDevices = getDeviceCount();
\r
2874 devices_.resize( nDevices );
\r
2875 for ( unsigned int i=0; i<nDevices; i++ )
\r
2876 devices_[i] = getDeviceInfo( i );
\r
2879 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2880 unsigned int firstChannel, unsigned int sampleRate,
\r
2881 RtAudioFormat format, unsigned int *bufferSize,
\r
2882 RtAudio::StreamOptions *options )
\r
2883 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2885 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2887 // For ASIO, a duplex stream MUST use the same driver.
\r
2888 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2889 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2893 char driverName[32];
\r
2894 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2895 if ( result != ASE_OK ) {
\r
2896 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2897 errorText_ = errorStream_.str();
\r
2901 // Only load the driver once for duplex stream.
\r
2902 if ( !isDuplexInput ) {
\r
2903 // The getDeviceInfo() function will not work when a stream is open
\r
2904 // because ASIO does not allow multiple devices to run at the same
\r
2905 // time. Thus, we'll probe the system before opening a stream and
\r
2906 // save the results for use by getDeviceInfo().
\r
2907 this->saveDeviceInfo();
\r
2909 if ( !drivers.loadDriver( driverName ) ) {
\r
2910 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2911 errorText_ = errorStream_.str();
\r
2915 result = ASIOInit( &driverInfo );
\r
2916 if ( result != ASE_OK ) {
\r
2917 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2918 errorText_ = errorStream_.str();
\r
2923 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2924 bool buffersAllocated = false;
\r
2925 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2926 unsigned int nChannels;
\r
2929 // Check the device channel count.
\r
2930 long inputChannels, outputChannels;
\r
2931 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2932 if ( result != ASE_OK ) {
\r
2933 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2934 errorText_ = errorStream_.str();
\r
2938 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2939 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2940 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2941 errorText_ = errorStream_.str();
\r
2944 stream_.nDeviceChannels[mode] = channels;
\r
2945 stream_.nUserChannels[mode] = channels;
\r
2946 stream_.channelOffset[mode] = firstChannel;
\r
2948 // Verify the sample rate is supported.
\r
2949 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2950 if ( result != ASE_OK ) {
\r
2951 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2952 errorText_ = errorStream_.str();
\r
2956 // Get the current sample rate
\r
2957 ASIOSampleRate currentRate;
\r
2958 result = ASIOGetSampleRate( ¤tRate );
\r
2959 if ( result != ASE_OK ) {
\r
2960 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2961 errorText_ = errorStream_.str();
\r
2965 // Set the sample rate only if necessary
\r
2966 if ( currentRate != sampleRate ) {
\r
2967 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2968 if ( result != ASE_OK ) {
\r
2969 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2970 errorText_ = errorStream_.str();
\r
2975 // Determine the driver data type.
\r
2976 ASIOChannelInfo channelInfo;
\r
2977 channelInfo.channel = 0;
\r
2978 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2979 else channelInfo.isInput = true;
\r
2980 result = ASIOGetChannelInfo( &channelInfo );
\r
2981 if ( result != ASE_OK ) {
\r
2982 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2983 errorText_ = errorStream_.str();
\r
2987 // Assuming WINDOWS host is always little-endian.
\r
2988 stream_.doByteSwap[mode] = false;
\r
2989 stream_.userFormat = format;
\r
2990 stream_.deviceFormat[mode] = 0;
\r
2991 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2992 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2993 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2995 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2996 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2997 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2999 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3000 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3001 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3003 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3004 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3005 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3007 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3008 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3009 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3012 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3013 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3014 errorText_ = errorStream_.str();
\r
3018 // Set the buffer size. For a duplex stream, this will end up
\r
3019 // setting the buffer size based on the input constraints, which
\r
3021 long minSize, maxSize, preferSize, granularity;
\r
3022 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3023 if ( result != ASE_OK ) {
\r
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3025 errorText_ = errorStream_.str();
\r
3029 if ( isDuplexInput ) {
\r
3030 // When this is the duplex input (output was opened before), then we have to use the same
\r
3031 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3032 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3033 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3034 // to the "bufferSize" param as usual to set up processing buffers.
\r
3036 *bufferSize = stream_.bufferSize;
\r
3039 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3040 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3041 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3042 else if ( granularity == -1 ) {
\r
3043 // Make sure bufferSize is a power of two.
\r
3044 int log2_of_min_size = 0;
\r
3045 int log2_of_max_size = 0;
\r
3047 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3048 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3049 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3052 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3053 int min_delta_num = log2_of_min_size;
\r
3055 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3056 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3057 if (current_delta < min_delta) {
\r
3058 min_delta = current_delta;
\r
3059 min_delta_num = i;
\r
3063 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3064 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3065 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3067 else if ( granularity != 0 ) {
\r
3068 // Set to an even multiple of granularity, rounding up.
\r
3069 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3074 // we don't use it anymore, see above!
\r
3075 // Just left it here for the case...
\r
3076 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3077 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3082 stream_.bufferSize = *bufferSize;
\r
3083 stream_.nBuffers = 2;
\r
3085 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3086 else stream_.userInterleaved = true;
\r
3088 // ASIO always uses non-interleaved buffers.
\r
3089 stream_.deviceInterleaved[mode] = false;
\r
3091 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3092 if ( handle == 0 ) {
\r
3094 handle = new AsioHandle;
\r
3096 catch ( std::bad_alloc& ) {
\r
3097 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3100 handle->bufferInfos = 0;
\r
3102 // Create a manual-reset event.
\r
3103 handle->condition = CreateEvent( NULL, // no security
\r
3104 TRUE, // manual-reset
\r
3105 FALSE, // non-signaled initially
\r
3106 NULL ); // unnamed
\r
3107 stream_.apiHandle = (void *) handle;
\r
3110 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3111 // and output separately, we'll have to dispose of previously
\r
3112 // created output buffers for a duplex stream.
\r
3113 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3114 ASIODisposeBuffers();
\r
3115 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3118 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3120 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3121 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3122 if ( handle->bufferInfos == NULL ) {
\r
3123 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3124 errorText_ = errorStream_.str();
\r
3128 ASIOBufferInfo *infos;
\r
3129 infos = handle->bufferInfos;
\r
3130 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3131 infos->isInput = ASIOFalse;
\r
3132 infos->channelNum = i + stream_.channelOffset[0];
\r
3133 infos->buffers[0] = infos->buffers[1] = 0;
\r
3135 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3136 infos->isInput = ASIOTrue;
\r
3137 infos->channelNum = i + stream_.channelOffset[1];
\r
3138 infos->buffers[0] = infos->buffers[1] = 0;
\r
3141 // prepare for callbacks
\r
3142 stream_.sampleRate = sampleRate;
\r
3143 stream_.device[mode] = device;
\r
3144 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3146 // store this class instance before registering callbacks, that are going to use it
\r
3147 asioCallbackInfo = &stream_.callbackInfo;
\r
3148 stream_.callbackInfo.object = (void *) this;
\r
3150 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3151 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3152 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3153 asioCallbacks.asioMessage = &asioMessages;
\r
3154 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3155 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3156 if ( result != ASE_OK ) {
\r
3157 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3158 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3159 // in that case, let's be naïve and try that instead
\r
3160 *bufferSize = preferSize;
\r
3161 stream_.bufferSize = *bufferSize;
\r
3162 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3165 if ( result != ASE_OK ) {
\r
3166 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3167 errorText_ = errorStream_.str();
\r
3170 buffersAllocated = true;
\r
3171 stream_.state = STREAM_STOPPED;
\r
3173 // Set flags for buffer conversion.
\r
3174 stream_.doConvertBuffer[mode] = false;
\r
3175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3176 stream_.doConvertBuffer[mode] = true;
\r
3177 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3178 stream_.nUserChannels[mode] > 1 )
\r
3179 stream_.doConvertBuffer[mode] = true;
\r
3181 // Allocate necessary internal buffers
\r
3182 unsigned long bufferBytes;
\r
3183 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3184 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3185 if ( stream_.userBuffer[mode] == NULL ) {
\r
3186 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3190 if ( stream_.doConvertBuffer[mode] ) {
\r
3192 bool makeBuffer = true;
\r
3193 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3194 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3195 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3196 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3199 if ( makeBuffer ) {
\r
3200 bufferBytes *= *bufferSize;
\r
3201 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3202 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3203 if ( stream_.deviceBuffer == NULL ) {
\r
3204 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3210 // Determine device latencies
\r
3211 long inputLatency, outputLatency;
\r
3212 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3213 if ( result != ASE_OK ) {
\r
3214 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3215 errorText_ = errorStream_.str();
\r
3216 error( RtAudioError::WARNING); // warn but don't fail
\r
3219 stream_.latency[0] = outputLatency;
\r
3220 stream_.latency[1] = inputLatency;
\r
3223 // Setup the buffer conversion information structure. We don't use
\r
3224 // buffers to do channel offsets, so we override that parameter
\r
3226 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3231 if ( !isDuplexInput ) {
\r
3232 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3233 // So we clean up for single channel only
\r
3235 if ( buffersAllocated )
\r
3236 ASIODisposeBuffers();
\r
3238 drivers.removeCurrentDriver();
\r
3241 CloseHandle( handle->condition );
\r
3242 if ( handle->bufferInfos )
\r
3243 free( handle->bufferInfos );
\r
3246 stream_.apiHandle = 0;
\r
3250 if ( stream_.userBuffer[mode] ) {
\r
3251 free( stream_.userBuffer[mode] );
\r
3252 stream_.userBuffer[mode] = 0;
\r
3255 if ( stream_.deviceBuffer ) {
\r
3256 free( stream_.deviceBuffer );
\r
3257 stream_.deviceBuffer = 0;
\r
3262 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3264 void RtApiAsio :: closeStream()
\r
3266 if ( stream_.state == STREAM_CLOSED ) {
\r
3267 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3268 error( RtAudioError::WARNING );
\r
3272 if ( stream_.state == STREAM_RUNNING ) {
\r
3273 stream_.state = STREAM_STOPPED;
\r
3276 ASIODisposeBuffers();
\r
3277 drivers.removeCurrentDriver();
\r
3279 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3281 CloseHandle( handle->condition );
\r
3282 if ( handle->bufferInfos )
\r
3283 free( handle->bufferInfos );
\r
3285 stream_.apiHandle = 0;
\r
3288 for ( int i=0; i<2; i++ ) {
\r
3289 if ( stream_.userBuffer[i] ) {
\r
3290 free( stream_.userBuffer[i] );
\r
3291 stream_.userBuffer[i] = 0;
\r
3295 if ( stream_.deviceBuffer ) {
\r
3296 free( stream_.deviceBuffer );
\r
3297 stream_.deviceBuffer = 0;
\r
3300 stream_.mode = UNINITIALIZED;
\r
3301 stream_.state = STREAM_CLOSED;
\r
3304 bool stopThreadCalled = false;
\r
3306 void RtApiAsio :: startStream()
\r
3309 if ( stream_.state == STREAM_RUNNING ) {
\r
3310 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3311 error( RtAudioError::WARNING );
\r
3315 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3316 ASIOError result = ASIOStart();
\r
3317 if ( result != ASE_OK ) {
\r
3318 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3319 errorText_ = errorStream_.str();
\r
3323 handle->drainCounter = 0;
\r
3324 handle->internalDrain = false;
\r
3325 ResetEvent( handle->condition );
\r
3326 stream_.state = STREAM_RUNNING;
\r
3330 stopThreadCalled = false;
\r
3332 if ( result == ASE_OK ) return;
\r
3333 error( RtAudioError::SYSTEM_ERROR );
\r
3336 void RtApiAsio :: stopStream()
\r
3339 if ( stream_.state == STREAM_STOPPED ) {
\r
3340 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3341 error( RtAudioError::WARNING );
\r
3345 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3347 if ( handle->drainCounter == 0 ) {
\r
3348 handle->drainCounter = 2;
\r
3349 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3353 stream_.state = STREAM_STOPPED;
\r
3355 ASIOError result = ASIOStop();
\r
3356 if ( result != ASE_OK ) {
\r
3357 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3358 errorText_ = errorStream_.str();
\r
3361 if ( result == ASE_OK ) return;
\r
3362 error( RtAudioError::SYSTEM_ERROR );
\r
3365 void RtApiAsio :: abortStream()
\r
3368 if ( stream_.state == STREAM_STOPPED ) {
\r
3369 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3370 error( RtAudioError::WARNING );
\r
3374 // The following lines were commented-out because some behavior was
\r
3375 // noted where the device buffers need to be zeroed to avoid
\r
3376 // continuing sound, even when the device buffers are completely
\r
3377 // disposed. So now, calling abort is the same as calling stop.
\r
3378 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3379 // handle->drainCounter = 2;
\r
3383 // This function will be called by a spawned thread when the user
\r
3384 // callback function signals that the stream should be stopped or
\r
3385 // aborted. It is necessary to handle it this way because the
\r
3386 // callbackEvent() function must return before the ASIOStop()
\r
3387 // function will return.
\r
3388 static unsigned __stdcall asioStopStream( void *ptr )
\r
3390 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3391 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3393 object->stopStream();
\r
3394 _endthreadex( 0 );
\r
3398 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3400 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3401 if ( stream_.state == STREAM_CLOSED ) {
\r
3402 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3403 error( RtAudioError::WARNING );
\r
3407 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3408 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3410 // Check if we were draining the stream and signal if finished.
\r
3411 if ( handle->drainCounter > 3 ) {
\r
3413 stream_.state = STREAM_STOPPING;
\r
3414 if ( handle->internalDrain == false )
\r
3415 SetEvent( handle->condition );
\r
3416 else { // spawn a thread to stop the stream
\r
3417 unsigned threadId;
\r
3418 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3419 &stream_.callbackInfo, 0, &threadId );
\r
3424 // Invoke user callback to get fresh output data UNLESS we are
\r
3425 // draining stream.
\r
3426 if ( handle->drainCounter == 0 ) {
\r
3427 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3428 double streamTime = getStreamTime();
\r
3429 RtAudioStreamStatus status = 0;
\r
3430 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3431 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3434 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3435 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3438 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3439 stream_.bufferSize, streamTime, status, info->userData );
\r
3440 if ( cbReturnValue == 2 ) {
\r
3441 stream_.state = STREAM_STOPPING;
\r
3442 handle->drainCounter = 2;
\r
3443 unsigned threadId;
\r
3444 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3445 &stream_.callbackInfo, 0, &threadId );
\r
3448 else if ( cbReturnValue == 1 ) {
\r
3449 handle->drainCounter = 1;
\r
3450 handle->internalDrain = true;
\r
3454 unsigned int nChannels, bufferBytes, i, j;
\r
3455 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3458 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3460 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3462 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3463 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3464 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3468 else if ( stream_.doConvertBuffer[0] ) {
\r
3470 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3471 if ( stream_.doByteSwap[0] )
\r
3472 byteSwapBuffer( stream_.deviceBuffer,
\r
3473 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3474 stream_.deviceFormat[0] );
\r
3476 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3477 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3478 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3479 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3485 if ( stream_.doByteSwap[0] )
\r
3486 byteSwapBuffer( stream_.userBuffer[0],
\r
3487 stream_.bufferSize * stream_.nUserChannels[0],
\r
3488 stream_.userFormat );
\r
3490 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3491 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3492 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3493 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3499 // Don't bother draining input
\r
3500 if ( handle->drainCounter ) {
\r
3501 handle->drainCounter++;
\r
3505 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3507 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3509 if (stream_.doConvertBuffer[1]) {
\r
3511 // Always interleave ASIO input data.
\r
3512 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3513 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3514 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3515 handle->bufferInfos[i].buffers[bufferIndex],
\r
3519 if ( stream_.doByteSwap[1] )
\r
3520 byteSwapBuffer( stream_.deviceBuffer,
\r
3521 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3522 stream_.deviceFormat[1] );
\r
3523 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3527 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3528 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3529 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3530 handle->bufferInfos[i].buffers[bufferIndex],
\r
3535 if ( stream_.doByteSwap[1] )
\r
3536 byteSwapBuffer( stream_.userBuffer[1],
\r
3537 stream_.bufferSize * stream_.nUserChannels[1],
\r
3538 stream_.userFormat );
\r
3543 // The following call was suggested by Malte Clasen. While the API
\r
3544 // documentation indicates it should not be required, some device
\r
3545 // drivers apparently do not function correctly without it.
\r
3546 ASIOOutputReady();
\r
3548 RtApi::tickStreamTime();
\r
3552 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3554 // The ASIO documentation says that this usually only happens during
\r
3555 // external sync. Audio processing is not stopped by the driver,
\r
3556 // actual sample rate might not have even changed, maybe only the
\r
3557 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3560 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3562 object->stopStream();
\r
3564 catch ( RtAudioError &exception ) {
\r
3565 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3569 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3572 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3576 switch( selector ) {
\r
3577 case kAsioSelectorSupported:
\r
3578 if ( value == kAsioResetRequest
\r
3579 || value == kAsioEngineVersion
\r
3580 || value == kAsioResyncRequest
\r
3581 || value == kAsioLatenciesChanged
\r
3582 // The following three were added for ASIO 2.0, you don't
\r
3583 // necessarily have to support them.
\r
3584 || value == kAsioSupportsTimeInfo
\r
3585 || value == kAsioSupportsTimeCode
\r
3586 || value == kAsioSupportsInputMonitor)
\r
3589 case kAsioResetRequest:
\r
3590 // Defer the task and perform the reset of the driver during the
\r
3591 // next "safe" situation. You cannot reset the driver right now,
\r
3592 // as this code is called from the driver. Reset the driver is
\r
3593 // done by completely destruct is. I.e. ASIOStop(),
\r
3594 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3596 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3599 case kAsioResyncRequest:
\r
3600 // This informs the application that the driver encountered some
\r
3601 // non-fatal data loss. It is used for synchronization purposes
\r
3602 // of different media. Added mainly to work around the Win16Mutex
\r
3603 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3604 // which could lose data because the Mutex was held too long by
\r
3605 // another thread. However a driver can issue it in other
\r
3606 // situations, too.
\r
3607 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3611 case kAsioLatenciesChanged:
\r
3612 // This will inform the host application that the drivers were
\r
3613 // latencies changed. Beware, it this does not mean that the
\r
3614 // buffer sizes have changed! You might need to update internal
\r
3616 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3619 case kAsioEngineVersion:
\r
3620 // Return the supported ASIO version of the host application. If
\r
3621 // a host application does not implement this selector, ASIO 1.0
\r
3622 // is assumed by the driver.
\r
3625 case kAsioSupportsTimeInfo:
\r
3626 // Informs the driver whether the
\r
3627 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3628 // For compatibility with ASIO 1.0 drivers the host application
\r
3629 // should always support the "old" bufferSwitch method, too.
\r
3632 case kAsioSupportsTimeCode:
\r
3633 // Informs the driver whether application is interested in time
\r
3634 // code info. If an application does not need to know about time
\r
3635 // code, the driver has less work to do.
\r
3642 static const char* getAsioErrorString( ASIOError result )
\r
3647 const char*message;
\r
3650 static const Messages m[] =
\r
3652 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3653 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3654 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3655 { ASE_InvalidMode, "Invalid mode." },
\r
3656 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3657 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3658 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3661 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3662 if ( m[i].value == result ) return m[i].message;
\r
3664 return "Unknown error.";
\r
3667 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3671 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3673 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3674 // - Introduces support for the Windows WASAPI API
\r
3675 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3676 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3677 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3682 #include <audioclient.h>
\r
3684 #include <mmdeviceapi.h>
\r
3685 #include <functiondiscoverykeys_devpkey.h>
\r
3687 //=============================================================================
\r
3689 #define SAFE_RELEASE( objectPtr )\
\r
3692 objectPtr->Release();\
\r
3693 objectPtr = NULL;\
\r
3696 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3698 //-----------------------------------------------------------------------------
\r
3700 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3701 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3702 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3703 // provide intermediate storage for read / write synchronization.
\r
3704 class WasapiBuffer
\r
3708 : buffer_( NULL ),
\r
3717 // sets the length of the internal ring buffer
\r
3718 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3721 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3723 bufferSize_ = bufferSize;
\r
3728 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3729 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3731 if ( !buffer || // incoming buffer is NULL
\r
3732 bufferSize == 0 || // incoming buffer has no data
\r
3733 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3738 unsigned int relOutIndex = outIndex_;
\r
3739 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3740 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3741 relOutIndex += bufferSize_;
\r
3744 // "in" index can end on the "out" index but cannot begin at it
\r
3745 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3746 return false; // not enough space between "in" index and "out" index
\r
3749 // copy buffer from external to internal
\r
3750 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3751 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3752 int fromInSize = bufferSize - fromZeroSize;
\r
3756 case RTAUDIO_SINT8:
\r
3757 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3758 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3760 case RTAUDIO_SINT16:
\r
3761 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3762 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3764 case RTAUDIO_SINT24:
\r
3765 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3766 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3768 case RTAUDIO_SINT32:
\r
3769 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3770 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3772 case RTAUDIO_FLOAT32:
\r
3773 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3774 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3776 case RTAUDIO_FLOAT64:
\r
3777 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3778 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3782 // update "in" index
\r
3783 inIndex_ += bufferSize;
\r
3784 inIndex_ %= bufferSize_;
\r
3789 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3790 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3792 if ( !buffer || // incoming buffer is NULL
\r
3793 bufferSize == 0 || // incoming buffer has no data
\r
3794 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3799 unsigned int relInIndex = inIndex_;
\r
3800 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3801 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3802 relInIndex += bufferSize_;
\r
3805 // "out" index can begin at and end on the "in" index
\r
3806 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3807 return false; // not enough space between "out" index and "in" index
\r
3810 // copy buffer from internal to external
\r
3811 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3812 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3813 int fromOutSize = bufferSize - fromZeroSize;
\r
3817 case RTAUDIO_SINT8:
\r
3818 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3819 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3821 case RTAUDIO_SINT16:
\r
3822 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3823 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3825 case RTAUDIO_SINT24:
\r
3826 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3827 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3829 case RTAUDIO_SINT32:
\r
3830 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3831 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3833 case RTAUDIO_FLOAT32:
\r
3834 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3835 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3837 case RTAUDIO_FLOAT64:
\r
3838 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3839 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3843 // update "out" index
\r
3844 outIndex_ += bufferSize;
\r
3845 outIndex_ %= bufferSize_;
\r
3852 unsigned int bufferSize_;
\r
3853 unsigned int inIndex_;
\r
3854 unsigned int outIndex_;
\r
3857 //-----------------------------------------------------------------------------
\r
3859 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3860 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3861 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3862 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3863 // one rate and its multiple.
\r
3864 void convertBufferWasapi( char* outBuffer,
\r
3865 const char* inBuffer,
\r
3866 const unsigned int& channelCount,
\r
3867 const unsigned int& inSampleRate,
\r
3868 const unsigned int& outSampleRate,
\r
3869 const unsigned int& inSampleCount,
\r
3870 unsigned int& outSampleCount,
\r
3871 const RtAudioFormat& format )
\r
3873 // calculate the new outSampleCount and relative sampleStep
\r
3874 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3875 float sampleStep = 1.0f / sampleRatio;
\r
3876 float inSampleFraction = 0.0f;
\r
3878 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3880 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3881 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3883 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3887 case RTAUDIO_SINT8:
\r
3888 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3890 case RTAUDIO_SINT16:
\r
3891 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3893 case RTAUDIO_SINT24:
\r
3894 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3896 case RTAUDIO_SINT32:
\r
3897 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3899 case RTAUDIO_FLOAT32:
\r
3900 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3902 case RTAUDIO_FLOAT64:
\r
3903 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3907 // jump to next in sample
\r
3908 inSampleFraction += sampleStep;
\r
3912 //-----------------------------------------------------------------------------
\r
3914 // A structure to hold various information related to the WASAPI implementation.
\r
3915 struct WasapiHandle
\r
3917 IAudioClient* captureAudioClient;
\r
3918 IAudioClient* renderAudioClient;
\r
3919 IAudioCaptureClient* captureClient;
\r
3920 IAudioRenderClient* renderClient;
\r
3921 HANDLE captureEvent;
\r
3922 HANDLE renderEvent;
\r
3925 : captureAudioClient( NULL ),
\r
3926 renderAudioClient( NULL ),
\r
3927 captureClient( NULL ),
\r
3928 renderClient( NULL ),
\r
3929 captureEvent( NULL ),
\r
3930 renderEvent( NULL ) {}
\r
3933 //=============================================================================
\r
3935 RtApiWasapi::RtApiWasapi()
\r
3936 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3938 // WASAPI can run either apartment or multi-threaded
\r
3939 HRESULT hr = CoInitialize( NULL );
\r
3940 if ( !FAILED( hr ) )
\r
3941 coInitialized_ = true;
\r
3943 // Instantiate device enumerator
\r
3944 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3945 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3946 ( void** ) &deviceEnumerator_ );
\r
3948 if ( FAILED( hr ) ) {
\r
3949 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3950 error( RtAudioError::DRIVER_ERROR );
\r
3954 //-----------------------------------------------------------------------------
\r
3956 RtApiWasapi::~RtApiWasapi()
\r
3958 if ( stream_.state != STREAM_CLOSED )
\r
3961 SAFE_RELEASE( deviceEnumerator_ );
\r
3963 // If this object previously called CoInitialize()
\r
3964 if ( coInitialized_ )
\r
3968 //=============================================================================
\r
3970 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3972 unsigned int captureDeviceCount = 0;
\r
3973 unsigned int renderDeviceCount = 0;
\r
3975 IMMDeviceCollection* captureDevices = NULL;
\r
3976 IMMDeviceCollection* renderDevices = NULL;
\r
3978 // Count capture devices
\r
3979 errorText_.clear();
\r
3980 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3981 if ( FAILED( hr ) ) {
\r
3982 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3986 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3987 if ( FAILED( hr ) ) {
\r
3988 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3992 // Count render devices
\r
3993 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3994 if ( FAILED( hr ) ) {
\r
3995 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3999 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4000 if ( FAILED( hr ) ) {
\r
4001 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4006 // release all references
\r
4007 SAFE_RELEASE( captureDevices );
\r
4008 SAFE_RELEASE( renderDevices );
\r
4010 if ( errorText_.empty() )
\r
4011 return captureDeviceCount + renderDeviceCount;
\r
4013 error( RtAudioError::DRIVER_ERROR );
\r
4017 //-----------------------------------------------------------------------------
\r
4019 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4021 RtAudio::DeviceInfo info;
\r
4022 unsigned int captureDeviceCount = 0;
\r
4023 unsigned int renderDeviceCount = 0;
\r
4024 std::string defaultDeviceName;
\r
4025 bool isCaptureDevice = false;
\r
4027 PROPVARIANT deviceNameProp;
\r
4028 PROPVARIANT defaultDeviceNameProp;
\r
4030 IMMDeviceCollection* captureDevices = NULL;
\r
4031 IMMDeviceCollection* renderDevices = NULL;
\r
4032 IMMDevice* devicePtr = NULL;
\r
4033 IMMDevice* defaultDevicePtr = NULL;
\r
4034 IAudioClient* audioClient = NULL;
\r
4035 IPropertyStore* devicePropStore = NULL;
\r
4036 IPropertyStore* defaultDevicePropStore = NULL;
\r
4038 WAVEFORMATEX* deviceFormat = NULL;
\r
4039 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4042 info.probed = false;
\r
4044 // Count capture devices
\r
4045 errorText_.clear();
\r
4046 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4047 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4048 if ( FAILED( hr ) ) {
\r
4049 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4053 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4054 if ( FAILED( hr ) ) {
\r
4055 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4059 // Count render devices
\r
4060 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4061 if ( FAILED( hr ) ) {
\r
4062 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4066 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4067 if ( FAILED( hr ) ) {
\r
4068 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4072 // validate device index
\r
4073 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4074 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4075 errorType = RtAudioError::INVALID_USE;
\r
4079 // determine whether index falls within capture or render devices
\r
4080 if ( device >= renderDeviceCount ) {
\r
4081 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4082 if ( FAILED( hr ) ) {
\r
4083 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4086 isCaptureDevice = true;
\r
4089 hr = renderDevices->Item( device, &devicePtr );
\r
4090 if ( FAILED( hr ) ) {
\r
4091 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4094 isCaptureDevice = false;
\r
4097 // get default device name
\r
4098 if ( isCaptureDevice ) {
\r
4099 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4100 if ( FAILED( hr ) ) {
\r
4101 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4106 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4107 if ( FAILED( hr ) ) {
\r
4108 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4113 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4114 if ( FAILED( hr ) ) {
\r
4115 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4118 PropVariantInit( &defaultDeviceNameProp );
\r
4120 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4121 if ( FAILED( hr ) ) {
\r
4122 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4126 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4129 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4130 if ( FAILED( hr ) ) {
\r
4131 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4135 PropVariantInit( &deviceNameProp );
\r
4137 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4138 if ( FAILED( hr ) ) {
\r
4139 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4143 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4146 if ( isCaptureDevice ) {
\r
4147 info.isDefaultInput = info.name == defaultDeviceName;
\r
4148 info.isDefaultOutput = false;
\r
4151 info.isDefaultInput = false;
\r
4152 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4156 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4157 if ( FAILED( hr ) ) {
\r
4158 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4162 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4163 if ( FAILED( hr ) ) {
\r
4164 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4168 if ( isCaptureDevice ) {
\r
4169 info.inputChannels = deviceFormat->nChannels;
\r
4170 info.outputChannels = 0;
\r
4171 info.duplexChannels = 0;
\r
4174 info.inputChannels = 0;
\r
4175 info.outputChannels = deviceFormat->nChannels;
\r
4176 info.duplexChannels = 0;
\r
4180 info.sampleRates.clear();
\r
4182 // allow support for all sample rates as we have a built-in sample rate converter
\r
4183 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4184 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4186 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4189 info.nativeFormats = 0;
\r
4191 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4192 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4193 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4195 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4196 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4198 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4199 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4202 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4203 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4204 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4206 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4207 info.nativeFormats |= RTAUDIO_SINT8;
\r
4209 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4210 info.nativeFormats |= RTAUDIO_SINT16;
\r
4212 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4213 info.nativeFormats |= RTAUDIO_SINT24;
\r
4215 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4216 info.nativeFormats |= RTAUDIO_SINT32;
\r
4221 info.probed = true;
\r
4224 // release all references
\r
4225 PropVariantClear( &deviceNameProp );
\r
4226 PropVariantClear( &defaultDeviceNameProp );
\r
4228 SAFE_RELEASE( captureDevices );
\r
4229 SAFE_RELEASE( renderDevices );
\r
4230 SAFE_RELEASE( devicePtr );
\r
4231 SAFE_RELEASE( defaultDevicePtr );
\r
4232 SAFE_RELEASE( audioClient );
\r
4233 SAFE_RELEASE( devicePropStore );
\r
4234 SAFE_RELEASE( defaultDevicePropStore );
\r
4236 CoTaskMemFree( deviceFormat );
\r
4237 CoTaskMemFree( closestMatchFormat );
\r
4239 if ( !errorText_.empty() )
\r
4240 error( errorType );
\r
4244 //-----------------------------------------------------------------------------
\r
4246 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4248 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4249 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4257 //-----------------------------------------------------------------------------
\r
4259 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4261 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4262 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4270 //-----------------------------------------------------------------------------
\r
4272 void RtApiWasapi::closeStream( void )
\r
4274 if ( stream_.state == STREAM_CLOSED ) {
\r
4275 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4276 error( RtAudioError::WARNING );
\r
4280 if ( stream_.state != STREAM_STOPPED )
\r
4283 // clean up stream memory
\r
4284 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4285 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4287 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4288 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4290 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4291 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4293 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4294 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4296 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4297 stream_.apiHandle = NULL;
\r
4299 for ( int i = 0; i < 2; i++ ) {
\r
4300 if ( stream_.userBuffer[i] ) {
\r
4301 free( stream_.userBuffer[i] );
\r
4302 stream_.userBuffer[i] = 0;
\r
4306 if ( stream_.deviceBuffer ) {
\r
4307 free( stream_.deviceBuffer );
\r
4308 stream_.deviceBuffer = 0;
\r
4311 // update stream state
\r
4312 stream_.state = STREAM_CLOSED;
\r
4315 //-----------------------------------------------------------------------------
\r
4317 void RtApiWasapi::startStream( void )
\r
4321 if ( stream_.state == STREAM_RUNNING ) {
\r
4322 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4323 error( RtAudioError::WARNING );
\r
4327 // update stream state
\r
4328 stream_.state = STREAM_RUNNING;
\r
4330 // create WASAPI stream thread
\r
4331 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4333 if ( !stream_.callbackInfo.thread ) {
\r
4334 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4335 error( RtAudioError::THREAD_ERROR );
\r
4338 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4339 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4343 //-----------------------------------------------------------------------------
\r
4345 void RtApiWasapi::stopStream( void )
\r
4349 if ( stream_.state == STREAM_STOPPED ) {
\r
4350 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4351 error( RtAudioError::WARNING );
\r
4355 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4356 stream_.state = STREAM_STOPPING;
\r
4358 // wait until stream thread is stopped
\r
4359 while( stream_.state != STREAM_STOPPED ) {
\r
4363 // Wait for the last buffer to play before stopping.
\r
4364 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4366 // stop capture client if applicable
\r
4367 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4368 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4369 if ( FAILED( hr ) ) {
\r
4370 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4371 error( RtAudioError::DRIVER_ERROR );
\r
4376 // stop render client if applicable
\r
4377 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4378 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4379 if ( FAILED( hr ) ) {
\r
4380 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4381 error( RtAudioError::DRIVER_ERROR );
\r
4386 // close thread handle
\r
4387 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4388 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4389 error( RtAudioError::THREAD_ERROR );
\r
4393 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4396 //-----------------------------------------------------------------------------
\r
4398 void RtApiWasapi::abortStream( void )
\r
4402 if ( stream_.state == STREAM_STOPPED ) {
\r
4403 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4404 error( RtAudioError::WARNING );
\r
4408 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4409 stream_.state = STREAM_STOPPING;
\r
4411 // wait until stream thread is stopped
\r
4412 while ( stream_.state != STREAM_STOPPED ) {
\r
4416 // stop capture client if applicable
\r
4417 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4418 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4419 if ( FAILED( hr ) ) {
\r
4420 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4421 error( RtAudioError::DRIVER_ERROR );
\r
4426 // stop render client if applicable
\r
4427 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4428 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4429 if ( FAILED( hr ) ) {
\r
4430 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4431 error( RtAudioError::DRIVER_ERROR );
\r
4436 // close thread handle
\r
4437 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4438 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4439 error( RtAudioError::THREAD_ERROR );
\r
4443 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4446 //-----------------------------------------------------------------------------
\r
4448 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4449 unsigned int firstChannel, unsigned int sampleRate,
\r
4450 RtAudioFormat format, unsigned int* bufferSize,
\r
4451 RtAudio::StreamOptions* options )
\r
4453 bool methodResult = FAILURE;
\r
4454 unsigned int captureDeviceCount = 0;
\r
4455 unsigned int renderDeviceCount = 0;
\r
4457 IMMDeviceCollection* captureDevices = NULL;
\r
4458 IMMDeviceCollection* renderDevices = NULL;
\r
4459 IMMDevice* devicePtr = NULL;
\r
4460 WAVEFORMATEX* deviceFormat = NULL;
\r
4461 unsigned int bufferBytes;
\r
4462 stream_.state = STREAM_STOPPED;
\r
4464 // create API Handle if not already created
\r
4465 if ( !stream_.apiHandle )
\r
4466 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4468 // Count capture devices
\r
4469 errorText_.clear();
\r
4470 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4471 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4472 if ( FAILED( hr ) ) {
\r
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4477 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4478 if ( FAILED( hr ) ) {
\r
4479 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4483 // Count render devices
\r
4484 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4485 if ( FAILED( hr ) ) {
\r
4486 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4490 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4491 if ( FAILED( hr ) ) {
\r
4492 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4496 // validate device index
\r
4497 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4498 errorType = RtAudioError::INVALID_USE;
\r
4499 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4503 // determine whether index falls within capture or render devices
\r
4504 if ( device >= renderDeviceCount ) {
\r
4505 if ( mode != INPUT ) {
\r
4506 errorType = RtAudioError::INVALID_USE;
\r
4507 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4511 // retrieve captureAudioClient from devicePtr
\r
4512 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4514 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4515 if ( FAILED( hr ) ) {
\r
4516 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4520 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4521 NULL, ( void** ) &captureAudioClient );
\r
4522 if ( FAILED( hr ) ) {
\r
4523 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4527 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4528 if ( FAILED( hr ) ) {
\r
4529 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4533 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4534 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4537 if ( mode != OUTPUT ) {
\r
4538 errorType = RtAudioError::INVALID_USE;
\r
4539 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4543 // retrieve renderAudioClient from devicePtr
\r
4544 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4546 hr = renderDevices->Item( device, &devicePtr );
\r
4547 if ( FAILED( hr ) ) {
\r
4548 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4552 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4553 NULL, ( void** ) &renderAudioClient );
\r
4554 if ( FAILED( hr ) ) {
\r
4555 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4559 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4560 if ( FAILED( hr ) ) {
\r
4561 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4565 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4566 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4569 // fill stream data
\r
4570 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4571 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4572 stream_.mode = DUPLEX;
\r
4575 stream_.mode = mode;
\r
4578 stream_.device[mode] = device;
\r
4579 stream_.doByteSwap[mode] = false;
\r
4580 stream_.sampleRate = sampleRate;
\r
4581 stream_.bufferSize = *bufferSize;
\r
4582 stream_.nBuffers = 1;
\r
4583 stream_.nUserChannels[mode] = channels;
\r
4584 stream_.channelOffset[mode] = firstChannel;
\r
4585 stream_.userFormat = format;
\r
4586 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4588 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4589 stream_.userInterleaved = false;
\r
4591 stream_.userInterleaved = true;
\r
4592 stream_.deviceInterleaved[mode] = true;
\r
4594 // Set flags for buffer conversion.
\r
4595 stream_.doConvertBuffer[mode] = false;
\r
4596 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4597 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4598 stream_.doConvertBuffer[mode] = true;
\r
4599 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4600 stream_.nUserChannels[mode] > 1 )
\r
4601 stream_.doConvertBuffer[mode] = true;
\r
4603 if ( stream_.doConvertBuffer[mode] )
\r
4604 setConvertInfo( mode, 0 );
\r
4606 // Allocate necessary internal buffers
\r
4607 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4609 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4610 if ( !stream_.userBuffer[mode] ) {
\r
4611 errorType = RtAudioError::MEMORY_ERROR;
\r
4612 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4616 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4617 stream_.callbackInfo.priority = 15;
\r
4619 stream_.callbackInfo.priority = 0;
\r
4621 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4622 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4624 methodResult = SUCCESS;
\r
4628 SAFE_RELEASE( captureDevices );
\r
4629 SAFE_RELEASE( renderDevices );
\r
4630 SAFE_RELEASE( devicePtr );
\r
4631 CoTaskMemFree( deviceFormat );
\r
4633 // if method failed, close the stream
\r
4634 if ( methodResult == FAILURE )
\r
4637 if ( !errorText_.empty() )
\r
4638 error( errorType );
\r
4639 return methodResult;
\r
4642 //=============================================================================
\r
4644 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4647 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4652 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4655 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4660 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4663 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4668 //-----------------------------------------------------------------------------
\r
4670 void RtApiWasapi::wasapiThread()
\r
4672 // as this is a new thread, we must CoInitialize it
\r
4673 CoInitialize( NULL );
\r
4677 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4678 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4679 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4680 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4681 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4682 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4684 WAVEFORMATEX* captureFormat = NULL;
\r
4685 WAVEFORMATEX* renderFormat = NULL;
\r
4686 float captureSrRatio = 0.0f;
\r
4687 float renderSrRatio = 0.0f;
\r
4688 WasapiBuffer captureBuffer;
\r
4689 WasapiBuffer renderBuffer;
\r
4691 // declare local stream variables
\r
4692 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4693 BYTE* streamBuffer = NULL;
\r
4694 unsigned long captureFlags = 0;
\r
4695 unsigned int bufferFrameCount = 0;
\r
4696 unsigned int numFramesPadding = 0;
\r
4697 unsigned int convBufferSize = 0;
\r
4698 bool callbackPushed = false;
\r
4699 bool callbackPulled = false;
\r
4700 bool callbackStopped = false;
\r
4701 int callbackResult = 0;
\r
4703 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4704 char* convBuffer = NULL;
\r
4705 unsigned int convBuffSize = 0;
\r
4706 unsigned int deviceBuffSize = 0;
\r
4708 errorText_.clear();
\r
4709 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4711 // Attempt to assign "Pro Audio" characteristic to thread
\r
4712 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4714 DWORD taskIndex = 0;
\r
4715 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4716 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4717 FreeLibrary( AvrtDll );
\r
4720 // start capture stream if applicable
\r
4721 if ( captureAudioClient ) {
\r
4722 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4723 if ( FAILED( hr ) ) {
\r
4724 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4728 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4730 // initialize capture stream according to desire buffer size
\r
4731 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4732 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4734 if ( !captureClient ) {
\r
4735 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4736 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4737 desiredBufferPeriod,
\r
4738 desiredBufferPeriod,
\r
4741 if ( FAILED( hr ) ) {
\r
4742 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4746 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4747 ( void** ) &captureClient );
\r
4748 if ( FAILED( hr ) ) {
\r
4749 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4753 // configure captureEvent to trigger on every available capture buffer
\r
4754 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4755 if ( !captureEvent ) {
\r
4756 errorType = RtAudioError::SYSTEM_ERROR;
\r
4757 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4761 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4762 if ( FAILED( hr ) ) {
\r
4763 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4767 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4768 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4771 unsigned int inBufferSize = 0;
\r
4772 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4773 if ( FAILED( hr ) ) {
\r
4774 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4778 // scale outBufferSize according to stream->user sample rate ratio
\r
4779 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4780 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4782 // set captureBuffer size
\r
4783 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4785 // reset the capture stream
\r
4786 hr = captureAudioClient->Reset();
\r
4787 if ( FAILED( hr ) ) {
\r
4788 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4792 // start the capture stream
\r
4793 hr = captureAudioClient->Start();
\r
4794 if ( FAILED( hr ) ) {
\r
4795 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4800 // start render stream if applicable
\r
4801 if ( renderAudioClient ) {
\r
4802 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4803 if ( FAILED( hr ) ) {
\r
4804 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4808 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4810 // initialize render stream according to desire buffer size
\r
4811 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4812 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4814 if ( !renderClient ) {
\r
4815 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4816 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4817 desiredBufferPeriod,
\r
4818 desiredBufferPeriod,
\r
4821 if ( FAILED( hr ) ) {
\r
4822 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4826 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4827 ( void** ) &renderClient );
\r
4828 if ( FAILED( hr ) ) {
\r
4829 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4833 // configure renderEvent to trigger on every available render buffer
\r
4834 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4835 if ( !renderEvent ) {
\r
4836 errorType = RtAudioError::SYSTEM_ERROR;
\r
4837 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4841 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4842 if ( FAILED( hr ) ) {
\r
4843 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4847 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4848 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4851 unsigned int outBufferSize = 0;
\r
4852 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4853 if ( FAILED( hr ) ) {
\r
4854 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4858 // scale inBufferSize according to user->stream sample rate ratio
\r
4859 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4860 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4862 // set renderBuffer size
\r
4863 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4865 // reset the render stream
\r
4866 hr = renderAudioClient->Reset();
\r
4867 if ( FAILED( hr ) ) {
\r
4868 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4872 // start the render stream
\r
4873 hr = renderAudioClient->Start();
\r
4874 if ( FAILED( hr ) ) {
\r
4875 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4880 if ( stream_.mode == INPUT ) {
\r
4881 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4882 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4884 else if ( stream_.mode == OUTPUT ) {
\r
4885 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4886 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4888 else if ( stream_.mode == DUPLEX ) {
\r
4889 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4890 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4891 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4892 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4895 convBuffer = ( char* ) malloc( convBuffSize );
\r
4896 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4897 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4898 errorType = RtAudioError::MEMORY_ERROR;
\r
4899 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4903 // stream process loop
\r
4904 while ( stream_.state != STREAM_STOPPING ) {
\r
4905 if ( !callbackPulled ) {
\r
4908 // 1. Pull callback buffer from inputBuffer
\r
4909 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4910 // Convert callback buffer to user format
\r
4912 if ( captureAudioClient ) {
\r
4913 // Pull callback buffer from inputBuffer
\r
4914 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4915 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4916 stream_.deviceFormat[INPUT] );
\r
4918 if ( callbackPulled ) {
\r
4919 // Convert callback buffer to user sample rate
\r
4920 convertBufferWasapi( stream_.deviceBuffer,
\r
4922 stream_.nDeviceChannels[INPUT],
\r
4923 captureFormat->nSamplesPerSec,
\r
4924 stream_.sampleRate,
\r
4925 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4927 stream_.deviceFormat[INPUT] );
\r
4929 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4930 // Convert callback buffer to user format
\r
4931 convertBuffer( stream_.userBuffer[INPUT],
\r
4932 stream_.deviceBuffer,
\r
4933 stream_.convertInfo[INPUT] );
\r
4936 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4937 memcpy( stream_.userBuffer[INPUT],
\r
4938 stream_.deviceBuffer,
\r
4939 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4944 // if there is no capture stream, set callbackPulled flag
\r
4945 callbackPulled = true;
\r
4948 // Execute Callback
\r
4949 // ================
\r
4950 // 1. Execute user callback method
\r
4951 // 2. Handle return value from callback
\r
4953 // if callback has not requested the stream to stop
\r
4954 if ( callbackPulled && !callbackStopped ) {
\r
4955 // Execute user callback method
\r
4956 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4957 stream_.userBuffer[INPUT],
\r
4958 stream_.bufferSize,
\r
4960 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4961 stream_.callbackInfo.userData );
\r
4963 // Handle return value from callback
\r
4964 if ( callbackResult == 1 ) {
\r
4965 // instantiate a thread to stop this thread
\r
4966 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4967 if ( !threadHandle ) {
\r
4968 errorType = RtAudioError::THREAD_ERROR;
\r
4969 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4972 else if ( !CloseHandle( threadHandle ) ) {
\r
4973 errorType = RtAudioError::THREAD_ERROR;
\r
4974 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4978 callbackStopped = true;
\r
4980 else if ( callbackResult == 2 ) {
\r
4981 // instantiate a thread to stop this thread
\r
4982 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4983 if ( !threadHandle ) {
\r
4984 errorType = RtAudioError::THREAD_ERROR;
\r
4985 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4988 else if ( !CloseHandle( threadHandle ) ) {
\r
4989 errorType = RtAudioError::THREAD_ERROR;
\r
4990 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4994 callbackStopped = true;
\r
4999 // Callback Output
\r
5000 // ===============
\r
5001 // 1. Convert callback buffer to stream format
\r
5002 // 2. Convert callback buffer to stream sample rate and channel count
\r
5003 // 3. Push callback buffer into outputBuffer
\r
5005 if ( renderAudioClient && callbackPulled ) {
\r
5006 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5007 // Convert callback buffer to stream format
\r
5008 convertBuffer( stream_.deviceBuffer,
\r
5009 stream_.userBuffer[OUTPUT],
\r
5010 stream_.convertInfo[OUTPUT] );
\r
5014 // Convert callback buffer to stream sample rate
\r
5015 convertBufferWasapi( convBuffer,
\r
5016 stream_.deviceBuffer,
\r
5017 stream_.nDeviceChannels[OUTPUT],
\r
5018 stream_.sampleRate,
\r
5019 renderFormat->nSamplesPerSec,
\r
5020 stream_.bufferSize,
\r
5022 stream_.deviceFormat[OUTPUT] );
\r
5024 // Push callback buffer into outputBuffer
\r
5025 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5026 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5027 stream_.deviceFormat[OUTPUT] );
\r
5030 // if there is no render stream, set callbackPushed flag
\r
5031 callbackPushed = true;
\r
5036 // 1. Get capture buffer from stream
\r
5037 // 2. Push capture buffer into inputBuffer
\r
5038 // 3. If 2. was successful: Release capture buffer
\r
5040 if ( captureAudioClient ) {
\r
5041 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5042 if ( !callbackPulled ) {
\r
5043 WaitForSingleObject( captureEvent, INFINITE );
\r
5046 // Get capture buffer from stream
\r
5047 hr = captureClient->GetBuffer( &streamBuffer,
\r
5048 &bufferFrameCount,
\r
5049 &captureFlags, NULL, NULL );
\r
5050 if ( FAILED( hr ) ) {
\r
5051 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5055 if ( bufferFrameCount != 0 ) {
\r
5056 // Push capture buffer into inputBuffer
\r
5057 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5058 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5059 stream_.deviceFormat[INPUT] ) )
\r
5061 // Release capture buffer
\r
5062 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5063 if ( FAILED( hr ) ) {
\r
5064 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5070 // Inform WASAPI that capture was unsuccessful
\r
5071 hr = captureClient->ReleaseBuffer( 0 );
\r
5072 if ( FAILED( hr ) ) {
\r
5073 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5080 // Inform WASAPI that capture was unsuccessful
\r
5081 hr = captureClient->ReleaseBuffer( 0 );
\r
5082 if ( FAILED( hr ) ) {
\r
5083 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5091 // 1. Get render buffer from stream
\r
5092 // 2. Pull next buffer from outputBuffer
\r
5093 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5094 // Release render buffer
\r
5096 if ( renderAudioClient ) {
\r
5097 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5098 if ( callbackPulled && !callbackPushed ) {
\r
5099 WaitForSingleObject( renderEvent, INFINITE );
\r
5102 // Get render buffer from stream
\r
5103 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5104 if ( FAILED( hr ) ) {
\r
5105 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5109 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5110 if ( FAILED( hr ) ) {
\r
5111 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5115 bufferFrameCount -= numFramesPadding;
\r
5117 if ( bufferFrameCount != 0 ) {
\r
5118 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5119 if ( FAILED( hr ) ) {
\r
5120 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5124 // Pull next buffer from outputBuffer
\r
5125 // Fill render buffer with next buffer
\r
5126 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5127 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5128 stream_.deviceFormat[OUTPUT] ) )
\r
5130 // Release render buffer
\r
5131 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5132 if ( FAILED( hr ) ) {
\r
5133 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5139 // Inform WASAPI that render was unsuccessful
\r
5140 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5141 if ( FAILED( hr ) ) {
\r
5142 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5149 // Inform WASAPI that render was unsuccessful
\r
5150 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5151 if ( FAILED( hr ) ) {
\r
5152 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5158 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5159 if ( callbackPushed ) {
\r
5160 callbackPulled = false;
\r
5163 // tick stream time
\r
5164 RtApi::tickStreamTime();
\r
5169 CoTaskMemFree( captureFormat );
\r
5170 CoTaskMemFree( renderFormat );
\r
5172 free ( convBuffer );
\r
5176 // update stream state
\r
5177 stream_.state = STREAM_STOPPED;
\r
5179 if ( errorText_.empty() )
\r
5182 error( errorType );
\r
5185 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5189 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5191 // Modified by Robin Davies, October 2005
\r
5192 // - Improvements to DirectX pointer chasing.
\r
5193 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5194 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5195 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5196 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5198 #include <dsound.h>
\r
5199 #include <assert.h>
\r
5200 #include <algorithm>
\r
5202 #if defined(__MINGW32__)
\r
5203 // missing from latest mingw winapi
\r
5204 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5205 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5206 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5207 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5210 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5212 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5213 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5216 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5218 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5219 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5220 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5221 return pointer >= earlierPointer && pointer < laterPointer;
\r
5224 // A structure to hold various information related to the DirectSound
\r
5225 // API implementation.
\r
5227 unsigned int drainCounter; // Tracks callback counts when draining
\r
5228 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5232 UINT bufferPointer[2];
\r
5233 DWORD dsBufferSize[2];
\r
5234 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5238 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5241 // Declarations for utility functions, callbacks, and structures
\r
5242 // specific to the DirectSound implementation.
\r
5243 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5244 LPCTSTR description,
\r
5246 LPVOID lpContext );
\r
5248 static const char* getErrorString( int code );
\r
5250 static unsigned __stdcall callbackHandler( void *ptr );
\r
5259 : found(false) { validId[0] = false; validId[1] = false; }
\r
5262 struct DsProbeData {
\r
5264 std::vector<struct DsDevice>* dsDevices;
\r
5267 RtApiDs :: RtApiDs()
\r
5269 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5270 // accept whatever the mainline chose for a threading model.
\r
5271 coInitialized_ = false;
\r
5272 HRESULT hr = CoInitialize( NULL );
\r
5273 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5276 RtApiDs :: ~RtApiDs()
\r
5278 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5279 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5282 // The DirectSound default output is always the first device.
\r
5283 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5288 // The DirectSound default input is always the first input device,
\r
5289 // which is the first capture device enumerated.
\r
5290 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5295 unsigned int RtApiDs :: getDeviceCount( void )
\r
5297 // Set query flag for previously found devices to false, so that we
\r
5298 // can check for any devices that have disappeared.
\r
5299 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5300 dsDevices[i].found = false;
\r
5302 // Query DirectSound devices.
\r
5303 struct DsProbeData probeInfo;
\r
5304 probeInfo.isInput = false;
\r
5305 probeInfo.dsDevices = &dsDevices;
\r
5306 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5307 if ( FAILED( result ) ) {
\r
5308 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5309 errorText_ = errorStream_.str();
\r
5310 error( RtAudioError::WARNING );
\r
5313 // Query DirectSoundCapture devices.
\r
5314 probeInfo.isInput = true;
\r
5315 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5316 if ( FAILED( result ) ) {
\r
5317 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5318 errorText_ = errorStream_.str();
\r
5319 error( RtAudioError::WARNING );
\r
5322 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5323 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5324 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5328 return static_cast<unsigned int>(dsDevices.size());
\r
5331 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5333 RtAudio::DeviceInfo info;
\r
5334 info.probed = false;
\r
5336 if ( dsDevices.size() == 0 ) {
\r
5337 // Force a query of all devices
\r
5339 if ( dsDevices.size() == 0 ) {
\r
5340 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5341 error( RtAudioError::INVALID_USE );
\r
5346 if ( device >= dsDevices.size() ) {
\r
5347 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5348 error( RtAudioError::INVALID_USE );
\r
5353 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5355 LPDIRECTSOUND output;
\r
5357 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5358 if ( FAILED( result ) ) {
\r
5359 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5360 errorText_ = errorStream_.str();
\r
5361 error( RtAudioError::WARNING );
\r
5365 outCaps.dwSize = sizeof( outCaps );
\r
5366 result = output->GetCaps( &outCaps );
\r
5367 if ( FAILED( result ) ) {
\r
5368 output->Release();
\r
5369 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5370 errorText_ = errorStream_.str();
\r
5371 error( RtAudioError::WARNING );
\r
5375 // Get output channel information.
\r
5376 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5378 // Get sample rate information.
\r
5379 info.sampleRates.clear();
\r
5380 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5381 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5382 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5383 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5385 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5386 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5390 // Get format information.
\r
5391 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5392 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5394 output->Release();
\r
5396 if ( getDefaultOutputDevice() == device )
\r
5397 info.isDefaultOutput = true;
\r
5399 if ( dsDevices[ device ].validId[1] == false ) {
\r
5400 info.name = dsDevices[ device ].name;
\r
5401 info.probed = true;
\r
5407 LPDIRECTSOUNDCAPTURE input;
\r
5408 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5409 if ( FAILED( result ) ) {
\r
5410 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5411 errorText_ = errorStream_.str();
\r
5412 error( RtAudioError::WARNING );
\r
5417 inCaps.dwSize = sizeof( inCaps );
\r
5418 result = input->GetCaps( &inCaps );
\r
5419 if ( FAILED( result ) ) {
\r
5421 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5422 errorText_ = errorStream_.str();
\r
5423 error( RtAudioError::WARNING );
\r
5427 // Get input channel information.
\r
5428 info.inputChannels = inCaps.dwChannels;
\r
5430 // Get sample rate and format information.
\r
5431 std::vector<unsigned int> rates;
\r
5432 if ( inCaps.dwChannels >= 2 ) {
\r
5433 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5434 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5442 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5443 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5444 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5446 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5448 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5449 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5455 else if ( inCaps.dwChannels == 1 ) {
\r
5456 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5457 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5458 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5459 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5460 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5461 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5462 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5463 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5465 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5466 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5467 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5468 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5469 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5471 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5472 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5473 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5474 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5475 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5478 else info.inputChannels = 0; // technically, this would be an error
\r
5482 if ( info.inputChannels == 0 ) return info;
\r
5484 // Copy the supported rates to the info structure but avoid duplication.
\r
5486 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5488 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5489 if ( rates[i] == info.sampleRates[j] ) {
\r
5494 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5496 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5498 // If device opens for both playback and capture, we determine the channels.
\r
5499 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5500 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5502 if ( device == 0 ) info.isDefaultInput = true;
\r
5504 // Copy name and return.
\r
5505 info.name = dsDevices[ device ].name;
\r
5506 info.probed = true;
\r
5510 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5511 unsigned int firstChannel, unsigned int sampleRate,
\r
5512 RtAudioFormat format, unsigned int *bufferSize,
\r
5513 RtAudio::StreamOptions *options )
\r
5515 if ( channels + firstChannel > 2 ) {
\r
5516 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5520 size_t nDevices = dsDevices.size();
\r
5521 if ( nDevices == 0 ) {
\r
5522 // This should not happen because a check is made before this function is called.
\r
5523 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5527 if ( device >= nDevices ) {
\r
5528 // This should not happen because a check is made before this function is called.
\r
5529 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5533 if ( mode == OUTPUT ) {
\r
5534 if ( dsDevices[ device ].validId[0] == false ) {
\r
5535 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5536 errorText_ = errorStream_.str();
\r
5540 else { // mode == INPUT
\r
5541 if ( dsDevices[ device ].validId[1] == false ) {
\r
5542 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5543 errorText_ = errorStream_.str();
\r
5548 // According to a note in PortAudio, using GetDesktopWindow()
\r
5549 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5550 // that occur when the application's window is not the foreground
\r
5551 // window. Also, if the application window closes before the
\r
5552 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5553 // problems when using GetDesktopWindow() but it seems fine now
\r
5554 // (January 2010). I'll leave it commented here.
\r
5555 // HWND hWnd = GetForegroundWindow();
\r
5556 HWND hWnd = GetDesktopWindow();
\r
5558 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5559 // two. This is a judgement call and a value of two is probably too
\r
5560 // low for capture, but it should work for playback.
\r
5562 if ( options ) nBuffers = options->numberOfBuffers;
\r
5563 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5564 if ( nBuffers < 2 ) nBuffers = 3;
\r
5566 // Check the lower range of the user-specified buffer size and set
\r
5567 // (arbitrarily) to a lower bound of 32.
\r
5568 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5570 // Create the wave format structure. The data format setting will
\r
5571 // be determined later.
\r
5572 WAVEFORMATEX waveFormat;
\r
5573 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5574 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5575 waveFormat.nChannels = channels + firstChannel;
\r
5576 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5578 // Determine the device buffer size. By default, we'll use the value
\r
5579 // defined above (32K), but we will grow it to make allowances for
\r
5580 // very large software buffer sizes.
\r
5581 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5582 DWORD dsPointerLeadTime = 0;
\r
5584 void *ohandle = 0, *bhandle = 0;
\r
5586 if ( mode == OUTPUT ) {
\r
5588 LPDIRECTSOUND output;
\r
5589 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5590 if ( FAILED( result ) ) {
\r
5591 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5592 errorText_ = errorStream_.str();
\r
5597 outCaps.dwSize = sizeof( outCaps );
\r
5598 result = output->GetCaps( &outCaps );
\r
5599 if ( FAILED( result ) ) {
\r
5600 output->Release();
\r
5601 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5602 errorText_ = errorStream_.str();
\r
5606 // Check channel information.
\r
5607 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5608 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5609 errorText_ = errorStream_.str();
\r
5613 // Check format information. Use 16-bit format unless not
\r
5614 // supported or user requests 8-bit.
\r
5615 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5616 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5617 waveFormat.wBitsPerSample = 16;
\r
5618 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5621 waveFormat.wBitsPerSample = 8;
\r
5622 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5624 stream_.userFormat = format;
\r
5626 // Update wave format structure and buffer information.
\r
5627 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5628 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5629 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5631 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5632 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5633 dsBufferSize *= 2;
\r
5635 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5636 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5637 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5638 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5639 if ( FAILED( result ) ) {
\r
5640 output->Release();
\r
5641 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5642 errorText_ = errorStream_.str();
\r
5646 // Even though we will write to the secondary buffer, we need to
\r
5647 // access the primary buffer to set the correct output format
\r
5648 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5649 // buffer description.
\r
5650 DSBUFFERDESC bufferDescription;
\r
5651 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5652 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5653 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5655 // Obtain the primary buffer
\r
5656 LPDIRECTSOUNDBUFFER buffer;
\r
5657 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5658 if ( FAILED( result ) ) {
\r
5659 output->Release();
\r
5660 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5661 errorText_ = errorStream_.str();
\r
5665 // Set the primary DS buffer sound format.
\r
5666 result = buffer->SetFormat( &waveFormat );
\r
5667 if ( FAILED( result ) ) {
\r
5668 output->Release();
\r
5669 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5670 errorText_ = errorStream_.str();
\r
5674 // Setup the secondary DS buffer description.
\r
5675 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5676 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5677 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5678 DSBCAPS_GLOBALFOCUS |
\r
5679 DSBCAPS_GETCURRENTPOSITION2 |
\r
5680 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5681 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5682 bufferDescription.lpwfxFormat = &waveFormat;
\r
5684 // Try to create the secondary DS buffer. If that doesn't work,
\r
5685 // try to use software mixing. Otherwise, there's a problem.
\r
5686 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5687 if ( FAILED( result ) ) {
\r
5688 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5689 DSBCAPS_GLOBALFOCUS |
\r
5690 DSBCAPS_GETCURRENTPOSITION2 |
\r
5691 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5692 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5693 if ( FAILED( result ) ) {
\r
5694 output->Release();
\r
5695 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5696 errorText_ = errorStream_.str();
\r
5701 // Get the buffer size ... might be different from what we specified.
\r
5703 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5704 result = buffer->GetCaps( &dsbcaps );
\r
5705 if ( FAILED( result ) ) {
\r
5706 output->Release();
\r
5707 buffer->Release();
\r
5708 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5709 errorText_ = errorStream_.str();
\r
5713 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5715 // Lock the DS buffer
\r
5718 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5719 if ( FAILED( result ) ) {
\r
5720 output->Release();
\r
5721 buffer->Release();
\r
5722 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5723 errorText_ = errorStream_.str();
\r
5727 // Zero the DS buffer
\r
5728 ZeroMemory( audioPtr, dataLen );
\r
5730 // Unlock the DS buffer
\r
5731 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5732 if ( FAILED( result ) ) {
\r
5733 output->Release();
\r
5734 buffer->Release();
\r
5735 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5736 errorText_ = errorStream_.str();
\r
5740 ohandle = (void *) output;
\r
5741 bhandle = (void *) buffer;
\r
5744 if ( mode == INPUT ) {
\r
5746 LPDIRECTSOUNDCAPTURE input;
\r
5747 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5748 if ( FAILED( result ) ) {
\r
5749 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5750 errorText_ = errorStream_.str();
\r
5755 inCaps.dwSize = sizeof( inCaps );
\r
5756 result = input->GetCaps( &inCaps );
\r
5757 if ( FAILED( result ) ) {
\r
5759 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5760 errorText_ = errorStream_.str();
\r
5764 // Check channel information.
\r
5765 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5766 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5770 // Check format information. Use 16-bit format unless user
\r
5771 // requests 8-bit.
\r
5772 DWORD deviceFormats;
\r
5773 if ( channels + firstChannel == 2 ) {
\r
5774 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5775 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5776 waveFormat.wBitsPerSample = 8;
\r
5777 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5779 else { // assume 16-bit is supported
\r
5780 waveFormat.wBitsPerSample = 16;
\r
5781 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5784 else { // channel == 1
\r
5785 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5786 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5787 waveFormat.wBitsPerSample = 8;
\r
5788 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5790 else { // assume 16-bit is supported
\r
5791 waveFormat.wBitsPerSample = 16;
\r
5792 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5795 stream_.userFormat = format;
\r
5797 // Update wave format structure and buffer information.
\r
5798 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5799 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5800 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5802 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5803 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5804 dsBufferSize *= 2;
\r
5806 // Setup the secondary DS buffer description.
\r
5807 DSCBUFFERDESC bufferDescription;
\r
5808 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5809 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5810 bufferDescription.dwFlags = 0;
\r
5811 bufferDescription.dwReserved = 0;
\r
5812 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5813 bufferDescription.lpwfxFormat = &waveFormat;
\r
5815 // Create the capture buffer.
\r
5816 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5817 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5818 if ( FAILED( result ) ) {
\r
5820 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5821 errorText_ = errorStream_.str();
\r
5825 // Get the buffer size ... might be different from what we specified.
\r
5826 DSCBCAPS dscbcaps;
\r
5827 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5828 result = buffer->GetCaps( &dscbcaps );
\r
5829 if ( FAILED( result ) ) {
\r
5831 buffer->Release();
\r
5832 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5833 errorText_ = errorStream_.str();
\r
5837 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5839 // NOTE: We could have a problem here if this is a duplex stream
\r
5840 // and the play and capture hardware buffer sizes are different
\r
5841 // (I'm actually not sure if that is a problem or not).
\r
5842 // Currently, we are not verifying that.
\r
5844 // Lock the capture buffer
\r
5847 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5848 if ( FAILED( result ) ) {
\r
5850 buffer->Release();
\r
5851 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5852 errorText_ = errorStream_.str();
\r
5856 // Zero the buffer
\r
5857 ZeroMemory( audioPtr, dataLen );
\r
5859 // Unlock the buffer
\r
5860 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5861 if ( FAILED( result ) ) {
\r
5863 buffer->Release();
\r
5864 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5865 errorText_ = errorStream_.str();
\r
5869 ohandle = (void *) input;
\r
5870 bhandle = (void *) buffer;
\r
5873 // Set various stream parameters
\r
5874 DsHandle *handle = 0;
\r
5875 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5876 stream_.nUserChannels[mode] = channels;
\r
5877 stream_.bufferSize = *bufferSize;
\r
5878 stream_.channelOffset[mode] = firstChannel;
\r
5879 stream_.deviceInterleaved[mode] = true;
\r
5880 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5881 else stream_.userInterleaved = true;
\r
5883 // Set flag for buffer conversion
\r
5884 stream_.doConvertBuffer[mode] = false;
\r
5885 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5886 stream_.doConvertBuffer[mode] = true;
\r
5887 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5888 stream_.doConvertBuffer[mode] = true;
\r
5889 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5890 stream_.nUserChannels[mode] > 1 )
\r
5891 stream_.doConvertBuffer[mode] = true;
\r
5893 // Allocate necessary internal buffers
\r
5894 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5895 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5896 if ( stream_.userBuffer[mode] == NULL ) {
\r
5897 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5901 if ( stream_.doConvertBuffer[mode] ) {
\r
5903 bool makeBuffer = true;
\r
5904 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5905 if ( mode == INPUT ) {
\r
5906 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5907 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5908 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5912 if ( makeBuffer ) {
\r
5913 bufferBytes *= *bufferSize;
\r
5914 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5915 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5916 if ( stream_.deviceBuffer == NULL ) {
\r
5917 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5923 // Allocate our DsHandle structures for the stream.
\r
5924 if ( stream_.apiHandle == 0 ) {
\r
5926 handle = new DsHandle;
\r
5928 catch ( std::bad_alloc& ) {
\r
5929 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5933 // Create a manual-reset event.
\r
5934 handle->condition = CreateEvent( NULL, // no security
\r
5935 TRUE, // manual-reset
\r
5936 FALSE, // non-signaled initially
\r
5937 NULL ); // unnamed
\r
5938 stream_.apiHandle = (void *) handle;
\r
5941 handle = (DsHandle *) stream_.apiHandle;
\r
5942 handle->id[mode] = ohandle;
\r
5943 handle->buffer[mode] = bhandle;
\r
5944 handle->dsBufferSize[mode] = dsBufferSize;
\r
5945 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5947 stream_.device[mode] = device;
\r
5948 stream_.state = STREAM_STOPPED;
\r
5949 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5950 // We had already set up an output stream.
\r
5951 stream_.mode = DUPLEX;
\r
5953 stream_.mode = mode;
\r
5954 stream_.nBuffers = nBuffers;
\r
5955 stream_.sampleRate = sampleRate;
\r
5957 // Setup the buffer conversion information structure.
\r
5958 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5960 // Setup the callback thread.
\r
5961 if ( stream_.callbackInfo.isRunning == false ) {
\r
5962 unsigned threadId;
\r
5963 stream_.callbackInfo.isRunning = true;
\r
5964 stream_.callbackInfo.object = (void *) this;
\r
5965 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5966 &stream_.callbackInfo, 0, &threadId );
\r
5967 if ( stream_.callbackInfo.thread == 0 ) {
\r
5968 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5972 // Boost DS thread priority
\r
5973 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5979 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5980 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5981 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5982 if ( buffer ) buffer->Release();
\r
5983 object->Release();
\r
5985 if ( handle->buffer[1] ) {
\r
5986 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5987 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5988 if ( buffer ) buffer->Release();
\r
5989 object->Release();
\r
5991 CloseHandle( handle->condition );
\r
5993 stream_.apiHandle = 0;
\r
5996 for ( int i=0; i<2; i++ ) {
\r
5997 if ( stream_.userBuffer[i] ) {
\r
5998 free( stream_.userBuffer[i] );
\r
5999 stream_.userBuffer[i] = 0;
\r
6003 if ( stream_.deviceBuffer ) {
\r
6004 free( stream_.deviceBuffer );
\r
6005 stream_.deviceBuffer = 0;
\r
6008 stream_.state = STREAM_CLOSED;
\r
6012 void RtApiDs :: closeStream()
\r
6014 if ( stream_.state == STREAM_CLOSED ) {
\r
6015 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6016 error( RtAudioError::WARNING );
\r
6020 // Stop the callback thread.
\r
6021 stream_.callbackInfo.isRunning = false;
\r
6022 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6023 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6025 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6027 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6028 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6029 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6032 buffer->Release();
\r
6034 object->Release();
\r
6036 if ( handle->buffer[1] ) {
\r
6037 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6038 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6041 buffer->Release();
\r
6043 object->Release();
\r
6045 CloseHandle( handle->condition );
\r
6047 stream_.apiHandle = 0;
\r
6050 for ( int i=0; i<2; i++ ) {
\r
6051 if ( stream_.userBuffer[i] ) {
\r
6052 free( stream_.userBuffer[i] );
\r
6053 stream_.userBuffer[i] = 0;
\r
6057 if ( stream_.deviceBuffer ) {
\r
6058 free( stream_.deviceBuffer );
\r
6059 stream_.deviceBuffer = 0;
\r
6062 stream_.mode = UNINITIALIZED;
\r
6063 stream_.state = STREAM_CLOSED;
\r
6066 void RtApiDs :: startStream()
\r
6069 if ( stream_.state == STREAM_RUNNING ) {
\r
6070 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6071 error( RtAudioError::WARNING );
\r
6075 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6077 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6078 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6079 // this is already in effect.
\r
6080 timeBeginPeriod( 1 );
\r
6082 buffersRolling = false;
\r
6083 duplexPrerollBytes = 0;
\r
6085 if ( stream_.mode == DUPLEX ) {
\r
6086 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6087 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6090 HRESULT result = 0;
\r
6091 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6093 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6094 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6095 if ( FAILED( result ) ) {
\r
6096 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6097 errorText_ = errorStream_.str();
\r
6102 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6104 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6105 result = buffer->Start( DSCBSTART_LOOPING );
\r
6106 if ( FAILED( result ) ) {
\r
6107 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6108 errorText_ = errorStream_.str();
\r
6113 handle->drainCounter = 0;
\r
6114 handle->internalDrain = false;
\r
6115 ResetEvent( handle->condition );
\r
6116 stream_.state = STREAM_RUNNING;
\r
6119 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6122 void RtApiDs :: stopStream()
\r
6125 if ( stream_.state == STREAM_STOPPED ) {
\r
6126 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6127 error( RtAudioError::WARNING );
\r
6131 HRESULT result = 0;
\r
6134 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6135 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6136 if ( handle->drainCounter == 0 ) {
\r
6137 handle->drainCounter = 2;
\r
6138 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6141 stream_.state = STREAM_STOPPED;
\r
6143 MUTEX_LOCK( &stream_.mutex );
\r
6145 // Stop the buffer and clear memory
\r
6146 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6147 result = buffer->Stop();
\r
6148 if ( FAILED( result ) ) {
\r
6149 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6150 errorText_ = errorStream_.str();
\r
6154 // Lock the buffer and clear it so that if we start to play again,
\r
6155 // we won't have old data playing.
\r
6156 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6157 if ( FAILED( result ) ) {
\r
6158 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6159 errorText_ = errorStream_.str();
\r
6163 // Zero the DS buffer
\r
6164 ZeroMemory( audioPtr, dataLen );
\r
6166 // Unlock the DS buffer
\r
6167 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6168 if ( FAILED( result ) ) {
\r
6169 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6170 errorText_ = errorStream_.str();
\r
6174 // If we start playing again, we must begin at beginning of buffer.
\r
6175 handle->bufferPointer[0] = 0;
\r
6178 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6179 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6183 stream_.state = STREAM_STOPPED;
\r
6185 if ( stream_.mode != DUPLEX )
\r
6186 MUTEX_LOCK( &stream_.mutex );
\r
6188 result = buffer->Stop();
\r
6189 if ( FAILED( result ) ) {
\r
6190 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6191 errorText_ = errorStream_.str();
\r
6195 // Lock the buffer and clear it so that if we start to play again,
\r
6196 // we won't have old data playing.
\r
6197 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6198 if ( FAILED( result ) ) {
\r
6199 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6200 errorText_ = errorStream_.str();
\r
6204 // Zero the DS buffer
\r
6205 ZeroMemory( audioPtr, dataLen );
\r
6207 // Unlock the DS buffer
\r
6208 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6209 if ( FAILED( result ) ) {
\r
6210 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6211 errorText_ = errorStream_.str();
\r
6215 // If we start recording again, we must begin at beginning of buffer.
\r
6216 handle->bufferPointer[1] = 0;
\r
6220 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6221 MUTEX_UNLOCK( &stream_.mutex );
\r
6223 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6226 void RtApiDs :: abortStream()
\r
6229 if ( stream_.state == STREAM_STOPPED ) {
\r
6230 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6231 error( RtAudioError::WARNING );
\r
6235 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6236 handle->drainCounter = 2;
\r
6241 void RtApiDs :: callbackEvent()
\r
6243 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6244 Sleep( 50 ); // sleep 50 milliseconds
\r
6248 if ( stream_.state == STREAM_CLOSED ) {
\r
6249 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6250 error( RtAudioError::WARNING );
\r
6254 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6255 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6257 // Check if we were draining the stream and signal is finished.
\r
6258 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6260 stream_.state = STREAM_STOPPING;
\r
6261 if ( handle->internalDrain == false )
\r
6262 SetEvent( handle->condition );
\r
6268 // Invoke user callback to get fresh output data UNLESS we are
\r
6269 // draining stream.
\r
6270 if ( handle->drainCounter == 0 ) {
\r
6271 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6272 double streamTime = getStreamTime();
\r
6273 RtAudioStreamStatus status = 0;
\r
6274 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6275 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6276 handle->xrun[0] = false;
\r
6278 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6279 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6280 handle->xrun[1] = false;
\r
6282 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6283 stream_.bufferSize, streamTime, status, info->userData );
\r
6284 if ( cbReturnValue == 2 ) {
\r
6285 stream_.state = STREAM_STOPPING;
\r
6286 handle->drainCounter = 2;
\r
6290 else if ( cbReturnValue == 1 ) {
\r
6291 handle->drainCounter = 1;
\r
6292 handle->internalDrain = true;
\r
6297 DWORD currentWritePointer, safeWritePointer;
\r
6298 DWORD currentReadPointer, safeReadPointer;
\r
6299 UINT nextWritePointer;
\r
6301 LPVOID buffer1 = NULL;
\r
6302 LPVOID buffer2 = NULL;
\r
6303 DWORD bufferSize1 = 0;
\r
6304 DWORD bufferSize2 = 0;
\r
6309 MUTEX_LOCK( &stream_.mutex );
\r
6310 if ( stream_.state == STREAM_STOPPED ) {
\r
6311 MUTEX_UNLOCK( &stream_.mutex );
\r
6315 if ( buffersRolling == false ) {
\r
6316 if ( stream_.mode == DUPLEX ) {
\r
6317 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6319 // It takes a while for the devices to get rolling. As a result,
\r
6320 // there's no guarantee that the capture and write device pointers
\r
6321 // will move in lockstep. Wait here for both devices to start
\r
6322 // rolling, and then set our buffer pointers accordingly.
\r
6323 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6324 // bytes later than the write buffer.
\r
6326 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6327 // take place between the two GetCurrentPosition calls... but I'm
\r
6328 // really not sure how to solve the problem. Temporarily boost to
\r
6329 // Realtime priority, maybe; but I'm not sure what priority the
\r
6330 // DirectSound service threads run at. We *should* be roughly
\r
6331 // within a ms or so of correct.
\r
6333 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6334 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6336 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6338 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6339 if ( FAILED( result ) ) {
\r
6340 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6341 errorText_ = errorStream_.str();
\r
6342 MUTEX_UNLOCK( &stream_.mutex );
\r
6343 error( RtAudioError::SYSTEM_ERROR );
\r
6346 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6347 if ( FAILED( result ) ) {
\r
6348 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6349 errorText_ = errorStream_.str();
\r
6350 MUTEX_UNLOCK( &stream_.mutex );
\r
6351 error( RtAudioError::SYSTEM_ERROR );
\r
6355 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6356 if ( FAILED( result ) ) {
\r
6357 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6358 errorText_ = errorStream_.str();
\r
6359 MUTEX_UNLOCK( &stream_.mutex );
\r
6360 error( RtAudioError::SYSTEM_ERROR );
\r
6363 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6364 if ( FAILED( result ) ) {
\r
6365 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6366 errorText_ = errorStream_.str();
\r
6367 MUTEX_UNLOCK( &stream_.mutex );
\r
6368 error( RtAudioError::SYSTEM_ERROR );
\r
6371 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6375 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6377 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6378 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6379 handle->bufferPointer[1] = safeReadPointer;
\r
6381 else if ( stream_.mode == OUTPUT ) {
\r
6383 // Set the proper nextWritePosition after initial startup.
\r
6384 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6385 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6386 if ( FAILED( result ) ) {
\r
6387 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6388 errorText_ = errorStream_.str();
\r
6389 MUTEX_UNLOCK( &stream_.mutex );
\r
6390 error( RtAudioError::SYSTEM_ERROR );
\r
6393 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6394 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6397 buffersRolling = true;
\r
6400 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6402 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6404 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6405 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6406 bufferBytes *= formatBytes( stream_.userFormat );
\r
6407 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6410 // Setup parameters and do buffer conversion if necessary.
\r
6411 if ( stream_.doConvertBuffer[0] ) {
\r
6412 buffer = stream_.deviceBuffer;
\r
6413 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6414 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6415 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6418 buffer = stream_.userBuffer[0];
\r
6419 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6420 bufferBytes *= formatBytes( stream_.userFormat );
\r
6423 // No byte swapping necessary in DirectSound implementation.
\r
6425 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6426 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6428 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6429 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6431 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6432 nextWritePointer = handle->bufferPointer[0];
\r
6434 DWORD endWrite, leadPointer;
\r
6436 // Find out where the read and "safe write" pointers are.
\r
6437 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6438 if ( FAILED( result ) ) {
\r
6439 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6440 errorText_ = errorStream_.str();
\r
6441 error( RtAudioError::SYSTEM_ERROR );
\r
6445 // We will copy our output buffer into the region between
\r
6446 // safeWritePointer and leadPointer. If leadPointer is not
\r
6447 // beyond the next endWrite position, wait until it is.
\r
6448 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6449 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6450 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6451 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6452 endWrite = nextWritePointer + bufferBytes;
\r
6454 // Check whether the entire write region is behind the play pointer.
\r
6455 if ( leadPointer >= endWrite ) break;
\r
6457 // If we are here, then we must wait until the leadPointer advances
\r
6458 // beyond the end of our next write region. We use the
\r
6459 // Sleep() function to suspend operation until that happens.
\r
6460 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6461 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6462 if ( millis < 1.0 ) millis = 1.0;
\r
6463 Sleep( (DWORD) millis );
\r
6466 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6467 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6468 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6469 handle->xrun[0] = true;
\r
6470 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6471 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6472 handle->bufferPointer[0] = nextWritePointer;
\r
6473 endWrite = nextWritePointer + bufferBytes;
\r
6476 // Lock free space in the buffer
\r
6477 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6478 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6479 if ( FAILED( result ) ) {
\r
6480 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6481 errorText_ = errorStream_.str();
\r
6482 MUTEX_UNLOCK( &stream_.mutex );
\r
6483 error( RtAudioError::SYSTEM_ERROR );
\r
6487 // Copy our buffer into the DS buffer
\r
6488 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6489 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6491 // Update our buffer offset and unlock sound buffer
\r
6492 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6493 if ( FAILED( result ) ) {
\r
6494 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6495 errorText_ = errorStream_.str();
\r
6496 MUTEX_UNLOCK( &stream_.mutex );
\r
6497 error( RtAudioError::SYSTEM_ERROR );
\r
6500 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6501 handle->bufferPointer[0] = nextWritePointer;
\r
6504 // Don't bother draining input
\r
6505 if ( handle->drainCounter ) {
\r
6506 handle->drainCounter++;
\r
6510 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6512 // Setup parameters.
\r
6513 if ( stream_.doConvertBuffer[1] ) {
\r
6514 buffer = stream_.deviceBuffer;
\r
6515 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6516 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6519 buffer = stream_.userBuffer[1];
\r
6520 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6521 bufferBytes *= formatBytes( stream_.userFormat );
\r
6524 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6525 long nextReadPointer = handle->bufferPointer[1];
\r
6526 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6528 // Find out where the write and "safe read" pointers are.
\r
6529 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6530 if ( FAILED( result ) ) {
\r
6531 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6532 errorText_ = errorStream_.str();
\r
6533 MUTEX_UNLOCK( &stream_.mutex );
\r
6534 error( RtAudioError::SYSTEM_ERROR );
\r
6538 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6539 DWORD endRead = nextReadPointer + bufferBytes;
\r
6541 // Handling depends on whether we are INPUT or DUPLEX.
\r
6542 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6543 // then a wait here will drag the write pointers into the forbidden zone.
\r
6545 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6546 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6547 // practical way to sync up the read and write pointers reliably, given the
\r
6548 // the very complex relationship between phase and increment of the read and write
\r
6551 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6552 // provide a pre-roll period of 0.5 seconds in which we return
\r
6553 // zeros from the read buffer while the pointers sync up.
\r
6555 if ( stream_.mode == DUPLEX ) {
\r
6556 if ( safeReadPointer < endRead ) {
\r
6557 if ( duplexPrerollBytes <= 0 ) {
\r
6558 // Pre-roll time over. Be more agressive.
\r
6559 int adjustment = endRead-safeReadPointer;
\r
6561 handle->xrun[1] = true;
\r
6563 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6564 // and perform fine adjustments later.
\r
6565 // - small adjustments: back off by twice as much.
\r
6566 if ( adjustment >= 2*bufferBytes )
\r
6567 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6569 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6571 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6575 // In pre=roll time. Just do it.
\r
6576 nextReadPointer = safeReadPointer - bufferBytes;
\r
6577 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6579 endRead = nextReadPointer + bufferBytes;
\r
6582 else { // mode == INPUT
\r
6583 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6584 // See comments for playback.
\r
6585 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6586 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6587 if ( millis < 1.0 ) millis = 1.0;
\r
6588 Sleep( (DWORD) millis );
\r
6590 // Wake up and find out where we are now.
\r
6591 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6592 if ( FAILED( result ) ) {
\r
6593 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6594 errorText_ = errorStream_.str();
\r
6595 MUTEX_UNLOCK( &stream_.mutex );
\r
6596 error( RtAudioError::SYSTEM_ERROR );
\r
6600 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6604 // Lock free space in the buffer
\r
6605 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6606 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6607 if ( FAILED( result ) ) {
\r
6608 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6609 errorText_ = errorStream_.str();
\r
6610 MUTEX_UNLOCK( &stream_.mutex );
\r
6611 error( RtAudioError::SYSTEM_ERROR );
\r
6615 if ( duplexPrerollBytes <= 0 ) {
\r
6616 // Copy our buffer into the DS buffer
\r
6617 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6618 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6621 memset( buffer, 0, bufferSize1 );
\r
6622 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6623 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6626 // Update our buffer offset and unlock sound buffer
\r
6627 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6628 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6629 if ( FAILED( result ) ) {
\r
6630 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6631 errorText_ = errorStream_.str();
\r
6632 MUTEX_UNLOCK( &stream_.mutex );
\r
6633 error( RtAudioError::SYSTEM_ERROR );
\r
6636 handle->bufferPointer[1] = nextReadPointer;
\r
6638 // No byte swapping necessary in DirectSound implementation.
\r
6640 // If necessary, convert 8-bit data from unsigned to signed.
\r
6641 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6642 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6644 // Do buffer conversion if necessary.
\r
6645 if ( stream_.doConvertBuffer[1] )
\r
6646 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6650 MUTEX_UNLOCK( &stream_.mutex );
\r
6651 RtApi::tickStreamTime();
\r
6654 // Definitions for utility functions and callbacks
\r
6655 // specific to the DirectSound implementation.
\r
6657 static unsigned __stdcall callbackHandler( void *ptr )
\r
6659 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6660 RtApiDs *object = (RtApiDs *) info->object;
\r
6661 bool* isRunning = &info->isRunning;
\r
6663 while ( *isRunning == true ) {
\r
6664 object->callbackEvent();
\r
6667 _endthreadex( 0 );
\r
6671 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6672 LPCTSTR description,
\r
6673 LPCTSTR /*module*/,
\r
6674 LPVOID lpContext )
\r
6676 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6677 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6680 bool validDevice = false;
\r
6681 if ( probeInfo.isInput == true ) {
\r
6683 LPDIRECTSOUNDCAPTURE object;
\r
6685 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6686 if ( hr != DS_OK ) return TRUE;
\r
6688 caps.dwSize = sizeof(caps);
\r
6689 hr = object->GetCaps( &caps );
\r
6690 if ( hr == DS_OK ) {
\r
6691 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6692 validDevice = true;
\r
6694 object->Release();
\r
6698 LPDIRECTSOUND object;
\r
6699 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6700 if ( hr != DS_OK ) return TRUE;
\r
6702 caps.dwSize = sizeof(caps);
\r
6703 hr = object->GetCaps( &caps );
\r
6704 if ( hr == DS_OK ) {
\r
6705 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6706 validDevice = true;
\r
6708 object->Release();
\r
6711 // If good device, then save its name and guid.
\r
6712 std::string name = convertCharPointerToStdString( description );
\r
6713 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6714 if ( lpguid == NULL )
\r
6715 name = "Default Device";
\r
6716 if ( validDevice ) {
\r
6717 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6718 if ( dsDevices[i].name == name ) {
\r
6719 dsDevices[i].found = true;
\r
6720 if ( probeInfo.isInput ) {
\r
6721 dsDevices[i].id[1] = lpguid;
\r
6722 dsDevices[i].validId[1] = true;
\r
6725 dsDevices[i].id[0] = lpguid;
\r
6726 dsDevices[i].validId[0] = true;
\r
6733 device.name = name;
\r
6734 device.found = true;
\r
6735 if ( probeInfo.isInput ) {
\r
6736 device.id[1] = lpguid;
\r
6737 device.validId[1] = true;
\r
6740 device.id[0] = lpguid;
\r
6741 device.validId[0] = true;
\r
6743 dsDevices.push_back( device );
\r
6749 static const char* getErrorString( int code )
\r
6753 case DSERR_ALLOCATED:
\r
6754 return "Already allocated";
\r
6756 case DSERR_CONTROLUNAVAIL:
\r
6757 return "Control unavailable";
\r
6759 case DSERR_INVALIDPARAM:
\r
6760 return "Invalid parameter";
\r
6762 case DSERR_INVALIDCALL:
\r
6763 return "Invalid call";
\r
6765 case DSERR_GENERIC:
\r
6766 return "Generic error";
\r
6768 case DSERR_PRIOLEVELNEEDED:
\r
6769 return "Priority level needed";
\r
6771 case DSERR_OUTOFMEMORY:
\r
6772 return "Out of memory";
\r
6774 case DSERR_BADFORMAT:
\r
6775 return "The sample rate or the channel format is not supported";
\r
6777 case DSERR_UNSUPPORTED:
\r
6778 return "Not supported";
\r
6780 case DSERR_NODRIVER:
\r
6781 return "No driver";
\r
6783 case DSERR_ALREADYINITIALIZED:
\r
6784 return "Already initialized";
\r
6786 case DSERR_NOAGGREGATION:
\r
6787 return "No aggregation";
\r
6789 case DSERR_BUFFERLOST:
\r
6790 return "Buffer lost";
\r
6792 case DSERR_OTHERAPPHASPRIO:
\r
6793 return "Another application already has priority";
\r
6795 case DSERR_UNINITIALIZED:
\r
6796 return "Uninitialized";
\r
6799 return "DirectSound unknown error";
\r
6802 //******************** End of __WINDOWS_DS__ *********************//
\r
6806 #if defined(__LINUX_ALSA__)
\r
6808 #include <alsa/asoundlib.h>
\r
6809 #include <unistd.h>
\r
6811 // A structure to hold various information related to the ALSA API
\r
6812 // implementation.
\r
6813 struct AlsaHandle {
\r
6814 snd_pcm_t *handles[2];
\r
6815 bool synchronized;
\r
6817 pthread_cond_t runnable_cv;
\r
6821 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6824 static void *alsaCallbackHandler( void * ptr );
\r
6826 RtApiAlsa :: RtApiAlsa()
\r
6828 // Nothing to do here.
\r
6831 RtApiAlsa :: ~RtApiAlsa()
\r
6833 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6836 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6838 unsigned nDevices = 0;
\r
6839 int result, subdevice, card;
\r
6841 snd_ctl_t *handle;
\r
6843 // Count cards and devices
\r
6845 snd_card_next( &card );
\r
6846 while ( card >= 0 ) {
\r
6847 sprintf( name, "hw:%d", card );
\r
6848 result = snd_ctl_open( &handle, name, 0 );
\r
6849 if ( result < 0 ) {
\r
6850 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6851 errorText_ = errorStream_.str();
\r
6852 error( RtAudioError::WARNING );
\r
6857 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6858 if ( result < 0 ) {
\r
6859 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6860 errorText_ = errorStream_.str();
\r
6861 error( RtAudioError::WARNING );
\r
6864 if ( subdevice < 0 )
\r
6869 snd_ctl_close( handle );
\r
6870 snd_card_next( &card );
\r
6873 result = snd_ctl_open( &handle, "default", 0 );
\r
6874 if (result == 0) {
\r
6876 snd_ctl_close( handle );
\r
6882 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6884 RtAudio::DeviceInfo info;
\r
6885 info.probed = false;
\r
6887 unsigned nDevices = 0;
\r
6888 int result, subdevice, card;
\r
6890 snd_ctl_t *chandle;
\r
6892 // Count cards and devices
\r
6895 snd_card_next( &card );
\r
6896 while ( card >= 0 ) {
\r
6897 sprintf( name, "hw:%d", card );
\r
6898 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6899 if ( result < 0 ) {
\r
6900 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6901 errorText_ = errorStream_.str();
\r
6902 error( RtAudioError::WARNING );
\r
6907 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6908 if ( result < 0 ) {
\r
6909 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6910 errorText_ = errorStream_.str();
\r
6911 error( RtAudioError::WARNING );
\r
6914 if ( subdevice < 0 ) break;
\r
6915 if ( nDevices == device ) {
\r
6916 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6922 snd_ctl_close( chandle );
\r
6923 snd_card_next( &card );
\r
6926 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6927 if ( result == 0 ) {
\r
6928 if ( nDevices == device ) {
\r
6929 strcpy( name, "default" );
\r
6935 if ( nDevices == 0 ) {
\r
6936 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6937 error( RtAudioError::INVALID_USE );
\r
6941 if ( device >= nDevices ) {
\r
6942 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6943 error( RtAudioError::INVALID_USE );
\r
6949 // If a stream is already open, we cannot probe the stream devices.
\r
6950 // Thus, use the saved results.
\r
6951 if ( stream_.state != STREAM_CLOSED &&
\r
6952 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6953 snd_ctl_close( chandle );
\r
6954 if ( device >= devices_.size() ) {
\r
6955 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6956 error( RtAudioError::WARNING );
\r
6959 return devices_[ device ];
\r
6962 int openMode = SND_PCM_ASYNC;
\r
6963 snd_pcm_stream_t stream;
\r
6964 snd_pcm_info_t *pcminfo;
\r
6965 snd_pcm_info_alloca( &pcminfo );
\r
6966 snd_pcm_t *phandle;
\r
6967 snd_pcm_hw_params_t *params;
\r
6968 snd_pcm_hw_params_alloca( ¶ms );
\r
6970 // First try for playback unless default device (which has subdev -1)
\r
6971 stream = SND_PCM_STREAM_PLAYBACK;
\r
6972 snd_pcm_info_set_stream( pcminfo, stream );
\r
6973 if ( subdevice != -1 ) {
\r
6974 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6975 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6977 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6978 if ( result < 0 ) {
\r
6979 // Device probably doesn't support playback.
\r
6980 goto captureProbe;
\r
6984 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6985 if ( result < 0 ) {
\r
6986 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6987 errorText_ = errorStream_.str();
\r
6988 error( RtAudioError::WARNING );
\r
6989 goto captureProbe;
\r
6992 // The device is open ... fill the parameter structure.
\r
6993 result = snd_pcm_hw_params_any( phandle, params );
\r
6994 if ( result < 0 ) {
\r
6995 snd_pcm_close( phandle );
\r
6996 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6997 errorText_ = errorStream_.str();
\r
6998 error( RtAudioError::WARNING );
\r
6999 goto captureProbe;
\r
7002 // Get output channel information.
\r
7003 unsigned int value;
\r
7004 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7005 if ( result < 0 ) {
\r
7006 snd_pcm_close( phandle );
\r
7007 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7008 errorText_ = errorStream_.str();
\r
7009 error( RtAudioError::WARNING );
\r
7010 goto captureProbe;
\r
7012 info.outputChannels = value;
\r
7013 snd_pcm_close( phandle );
\r
7016 stream = SND_PCM_STREAM_CAPTURE;
\r
7017 snd_pcm_info_set_stream( pcminfo, stream );
\r
7019 // Now try for capture unless default device (with subdev = -1)
\r
7020 if ( subdevice != -1 ) {
\r
7021 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7022 snd_ctl_close( chandle );
\r
7023 if ( result < 0 ) {
\r
7024 // Device probably doesn't support capture.
\r
7025 if ( info.outputChannels == 0 ) return info;
\r
7026 goto probeParameters;
\r
7030 snd_ctl_close( chandle );
\r
7032 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7033 if ( result < 0 ) {
\r
7034 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7035 errorText_ = errorStream_.str();
\r
7036 error( RtAudioError::WARNING );
\r
7037 if ( info.outputChannels == 0 ) return info;
\r
7038 goto probeParameters;
\r
7041 // The device is open ... fill the parameter structure.
\r
7042 result = snd_pcm_hw_params_any( phandle, params );
\r
7043 if ( result < 0 ) {
\r
7044 snd_pcm_close( phandle );
\r
7045 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7046 errorText_ = errorStream_.str();
\r
7047 error( RtAudioError::WARNING );
\r
7048 if ( info.outputChannels == 0 ) return info;
\r
7049 goto probeParameters;
\r
7052 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7053 if ( result < 0 ) {
\r
7054 snd_pcm_close( phandle );
\r
7055 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7056 errorText_ = errorStream_.str();
\r
7057 error( RtAudioError::WARNING );
\r
7058 if ( info.outputChannels == 0 ) return info;
\r
7059 goto probeParameters;
\r
7061 info.inputChannels = value;
\r
7062 snd_pcm_close( phandle );
\r
7064 // If device opens for both playback and capture, we determine the channels.
\r
7065 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7066 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7068 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7069 if ( device == 0 && info.outputChannels > 0 )
\r
7070 info.isDefaultOutput = true;
\r
7071 if ( device == 0 && info.inputChannels > 0 )
\r
7072 info.isDefaultInput = true;
\r
7075 // At this point, we just need to figure out the supported data
\r
7076 // formats and sample rates. We'll proceed by opening the device in
\r
7077 // the direction with the maximum number of channels, or playback if
\r
7078 // they are equal. This might limit our sample rate options, but so
\r
7081 if ( info.outputChannels >= info.inputChannels )
\r
7082 stream = SND_PCM_STREAM_PLAYBACK;
\r
7084 stream = SND_PCM_STREAM_CAPTURE;
\r
7085 snd_pcm_info_set_stream( pcminfo, stream );
\r
7087 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7088 if ( result < 0 ) {
\r
7089 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7090 errorText_ = errorStream_.str();
\r
7091 error( RtAudioError::WARNING );
\r
7095 // The device is open ... fill the parameter structure.
\r
7096 result = snd_pcm_hw_params_any( phandle, params );
\r
7097 if ( result < 0 ) {
\r
7098 snd_pcm_close( phandle );
\r
7099 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7100 errorText_ = errorStream_.str();
\r
7101 error( RtAudioError::WARNING );
\r
7105 // Test our discrete set of sample rate values.
\r
7106 info.sampleRates.clear();
\r
7107 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7108 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7109 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7111 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7112 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7115 if ( info.sampleRates.size() == 0 ) {
\r
7116 snd_pcm_close( phandle );
\r
7117 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7118 errorText_ = errorStream_.str();
\r
7119 error( RtAudioError::WARNING );
\r
7123 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7124 snd_pcm_format_t format;
\r
7125 info.nativeFormats = 0;
\r
7126 format = SND_PCM_FORMAT_S8;
\r
7127 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7128 info.nativeFormats |= RTAUDIO_SINT8;
\r
7129 format = SND_PCM_FORMAT_S16;
\r
7130 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7131 info.nativeFormats |= RTAUDIO_SINT16;
\r
7132 format = SND_PCM_FORMAT_S24;
\r
7133 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7134 info.nativeFormats |= RTAUDIO_SINT24;
\r
7135 format = SND_PCM_FORMAT_S32;
\r
7136 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7137 info.nativeFormats |= RTAUDIO_SINT32;
\r
7138 format = SND_PCM_FORMAT_FLOAT;
\r
7139 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7140 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7141 format = SND_PCM_FORMAT_FLOAT64;
\r
7142 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7143 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7145 // Check that we have at least one supported format
\r
7146 if ( info.nativeFormats == 0 ) {
\r
7147 snd_pcm_close( phandle );
\r
7148 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7149 errorText_ = errorStream_.str();
\r
7150 error( RtAudioError::WARNING );
\r
7154 // Get the device name
\r
7156 result = snd_card_get_name( card, &cardname );
\r
7157 if ( result >= 0 ) {
\r
7158 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7163 // That's all ... close the device and return
\r
7164 snd_pcm_close( phandle );
\r
7165 info.probed = true;
\r
7169 void RtApiAlsa :: saveDeviceInfo( void )
\r
7173 unsigned int nDevices = getDeviceCount();
\r
7174 devices_.resize( nDevices );
\r
7175 for ( unsigned int i=0; i<nDevices; i++ )
\r
7176 devices_[i] = getDeviceInfo( i );
\r
7179 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7180 unsigned int firstChannel, unsigned int sampleRate,
\r
7181 RtAudioFormat format, unsigned int *bufferSize,
\r
7182 RtAudio::StreamOptions *options )
\r
7185 #if defined(__RTAUDIO_DEBUG__)
\r
7186 snd_output_t *out;
\r
7187 snd_output_stdio_attach(&out, stderr, 0);
\r
7190 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7192 unsigned nDevices = 0;
\r
7193 int result, subdevice, card;
\r
7195 snd_ctl_t *chandle;
\r
7197 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7198 snprintf(name, sizeof(name), "%s", "default");
\r
7200 // Count cards and devices
\r
7202 snd_card_next( &card );
\r
7203 while ( card >= 0 ) {
\r
7204 sprintf( name, "hw:%d", card );
\r
7205 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7206 if ( result < 0 ) {
\r
7207 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7208 errorText_ = errorStream_.str();
\r
7213 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7214 if ( result < 0 ) break;
\r
7215 if ( subdevice < 0 ) break;
\r
7216 if ( nDevices == device ) {
\r
7217 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7218 snd_ctl_close( chandle );
\r
7223 snd_ctl_close( chandle );
\r
7224 snd_card_next( &card );
\r
7227 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7228 if ( result == 0 ) {
\r
7229 if ( nDevices == device ) {
\r
7230 strcpy( name, "default" );
\r
7236 if ( nDevices == 0 ) {
\r
7237 // This should not happen because a check is made before this function is called.
\r
7238 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7242 if ( device >= nDevices ) {
\r
7243 // This should not happen because a check is made before this function is called.
\r
7244 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7251 // The getDeviceInfo() function will not work for a device that is
\r
7252 // already open. Thus, we'll probe the system before opening a
\r
7253 // stream and save the results for use by getDeviceInfo().
\r
7254 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7255 this->saveDeviceInfo();
\r
7257 snd_pcm_stream_t stream;
\r
7258 if ( mode == OUTPUT )
\r
7259 stream = SND_PCM_STREAM_PLAYBACK;
\r
7261 stream = SND_PCM_STREAM_CAPTURE;
\r
7263 snd_pcm_t *phandle;
\r
7264 int openMode = SND_PCM_ASYNC;
\r
7265 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7266 if ( result < 0 ) {
\r
7267 if ( mode == OUTPUT )
\r
7268 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7270 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7271 errorText_ = errorStream_.str();
\r
7275 // Fill the parameter structure.
\r
7276 snd_pcm_hw_params_t *hw_params;
\r
7277 snd_pcm_hw_params_alloca( &hw_params );
\r
7278 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7279 if ( result < 0 ) {
\r
7280 snd_pcm_close( phandle );
\r
7281 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7282 errorText_ = errorStream_.str();
\r
7286 #if defined(__RTAUDIO_DEBUG__)
\r
7287 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7288 snd_pcm_hw_params_dump( hw_params, out );
\r
7291 // Set access ... check user preference.
\r
7292 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7293 stream_.userInterleaved = false;
\r
7294 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7295 if ( result < 0 ) {
\r
7296 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7297 stream_.deviceInterleaved[mode] = true;
\r
7300 stream_.deviceInterleaved[mode] = false;
\r
7303 stream_.userInterleaved = true;
\r
7304 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7305 if ( result < 0 ) {
\r
7306 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7307 stream_.deviceInterleaved[mode] = false;
\r
7310 stream_.deviceInterleaved[mode] = true;
\r
7313 if ( result < 0 ) {
\r
7314 snd_pcm_close( phandle );
\r
7315 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7316 errorText_ = errorStream_.str();
\r
7320 // Determine how to set the device format.
\r
7321 stream_.userFormat = format;
\r
7322 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7324 if ( format == RTAUDIO_SINT8 )
\r
7325 deviceFormat = SND_PCM_FORMAT_S8;
\r
7326 else if ( format == RTAUDIO_SINT16 )
\r
7327 deviceFormat = SND_PCM_FORMAT_S16;
\r
7328 else if ( format == RTAUDIO_SINT24 )
\r
7329 deviceFormat = SND_PCM_FORMAT_S24;
\r
7330 else if ( format == RTAUDIO_SINT32 )
\r
7331 deviceFormat = SND_PCM_FORMAT_S32;
\r
7332 else if ( format == RTAUDIO_FLOAT32 )
\r
7333 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7334 else if ( format == RTAUDIO_FLOAT64 )
\r
7335 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7337 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7338 stream_.deviceFormat[mode] = format;
\r
7342 // The user requested format is not natively supported by the device.
\r
7343 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7344 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7345 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7349 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7350 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7351 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7355 deviceFormat = SND_PCM_FORMAT_S32;
\r
7356 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7357 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7361 deviceFormat = SND_PCM_FORMAT_S24;
\r
7362 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7363 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7367 deviceFormat = SND_PCM_FORMAT_S16;
\r
7368 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7369 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7373 deviceFormat = SND_PCM_FORMAT_S8;
\r
7374 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7375 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7379 // If we get here, no supported format was found.
\r
7380 snd_pcm_close( phandle );
\r
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7382 errorText_ = errorStream_.str();
\r
7386 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7387 if ( result < 0 ) {
\r
7388 snd_pcm_close( phandle );
\r
7389 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7390 errorText_ = errorStream_.str();
\r
7394 // Determine whether byte-swaping is necessary.
\r
7395 stream_.doByteSwap[mode] = false;
\r
7396 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7397 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7398 if ( result == 0 )
\r
7399 stream_.doByteSwap[mode] = true;
\r
7400 else if (result < 0) {
\r
7401 snd_pcm_close( phandle );
\r
7402 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7403 errorText_ = errorStream_.str();
\r
7408 // Set the sample rate.
\r
7409 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7410 if ( result < 0 ) {
\r
7411 snd_pcm_close( phandle );
\r
7412 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7413 errorText_ = errorStream_.str();
\r
7417 // Determine the number of channels for this device. We support a possible
\r
7418 // minimum device channel number > than the value requested by the user.
\r
7419 stream_.nUserChannels[mode] = channels;
\r
7420 unsigned int value;
\r
7421 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7422 unsigned int deviceChannels = value;
\r
7423 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7424 snd_pcm_close( phandle );
\r
7425 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7426 errorText_ = errorStream_.str();
\r
7430 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7431 if ( result < 0 ) {
\r
7432 snd_pcm_close( phandle );
\r
7433 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7434 errorText_ = errorStream_.str();
\r
7437 deviceChannels = value;
\r
7438 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7439 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7441 // Set the device channels.
\r
7442 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7443 if ( result < 0 ) {
\r
7444 snd_pcm_close( phandle );
\r
7445 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7446 errorText_ = errorStream_.str();
\r
7450 // Set the buffer (or period) size.
\r
7452 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7453 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7454 if ( result < 0 ) {
\r
7455 snd_pcm_close( phandle );
\r
7456 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7457 errorText_ = errorStream_.str();
\r
7460 *bufferSize = periodSize;
\r
7462 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7463 unsigned int periods = 0;
\r
7464 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7465 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7466 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7467 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7468 if ( result < 0 ) {
\r
7469 snd_pcm_close( phandle );
\r
7470 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7471 errorText_ = errorStream_.str();
\r
7475 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7476 // MUST be the same in both directions!
\r
7477 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7478 snd_pcm_close( phandle );
\r
7479 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7480 errorText_ = errorStream_.str();
\r
7484 stream_.bufferSize = *bufferSize;
\r
7486 // Install the hardware configuration
\r
7487 result = snd_pcm_hw_params( phandle, hw_params );
\r
7488 if ( result < 0 ) {
\r
7489 snd_pcm_close( phandle );
\r
7490 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7491 errorText_ = errorStream_.str();
\r
7495 #if defined(__RTAUDIO_DEBUG__)
\r
7496 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7497 snd_pcm_hw_params_dump( hw_params, out );
\r
7500 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7501 snd_pcm_sw_params_t *sw_params = NULL;
\r
7502 snd_pcm_sw_params_alloca( &sw_params );
\r
7503 snd_pcm_sw_params_current( phandle, sw_params );
\r
7504 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7505 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7506 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7508 // The following two settings were suggested by Theo Veenker
\r
7509 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7510 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7512 // here are two options for a fix
\r
7513 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7514 snd_pcm_uframes_t val;
\r
7515 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7516 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7518 result = snd_pcm_sw_params( phandle, sw_params );
\r
7519 if ( result < 0 ) {
\r
7520 snd_pcm_close( phandle );
\r
7521 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7522 errorText_ = errorStream_.str();
\r
7526 #if defined(__RTAUDIO_DEBUG__)
\r
7527 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7528 snd_pcm_sw_params_dump( sw_params, out );
\r
7531 // Set flags for buffer conversion
\r
7532 stream_.doConvertBuffer[mode] = false;
\r
7533 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7534 stream_.doConvertBuffer[mode] = true;
\r
7535 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7536 stream_.doConvertBuffer[mode] = true;
\r
7537 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7538 stream_.nUserChannels[mode] > 1 )
\r
7539 stream_.doConvertBuffer[mode] = true;
\r
7541 // Allocate the ApiHandle if necessary and then save.
\r
7542 AlsaHandle *apiInfo = 0;
\r
7543 if ( stream_.apiHandle == 0 ) {
\r
7545 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7547 catch ( std::bad_alloc& ) {
\r
7548 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7552 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7553 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7557 stream_.apiHandle = (void *) apiInfo;
\r
7558 apiInfo->handles[0] = 0;
\r
7559 apiInfo->handles[1] = 0;
\r
7562 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7564 apiInfo->handles[mode] = phandle;
\r
7567 // Allocate necessary internal buffers.
\r
7568 unsigned long bufferBytes;
\r
7569 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7570 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7571 if ( stream_.userBuffer[mode] == NULL ) {
\r
7572 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7576 if ( stream_.doConvertBuffer[mode] ) {
\r
7578 bool makeBuffer = true;
\r
7579 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7580 if ( mode == INPUT ) {
\r
7581 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7582 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7583 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7587 if ( makeBuffer ) {
\r
7588 bufferBytes *= *bufferSize;
\r
7589 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7590 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7591 if ( stream_.deviceBuffer == NULL ) {
\r
7592 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7598 stream_.sampleRate = sampleRate;
\r
7599 stream_.nBuffers = periods;
\r
7600 stream_.device[mode] = device;
\r
7601 stream_.state = STREAM_STOPPED;
\r
7603 // Setup the buffer conversion information structure.
\r
7604 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7606 // Setup thread if necessary.
\r
7607 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7608 // We had already set up an output stream.
\r
7609 stream_.mode = DUPLEX;
\r
7610 // Link the streams if possible.
\r
7611 apiInfo->synchronized = false;
\r
7612 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7613 apiInfo->synchronized = true;
\r
7615 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7616 error( RtAudioError::WARNING );
\r
7620 stream_.mode = mode;
\r
7622 // Setup callback thread.
\r
7623 stream_.callbackInfo.object = (void *) this;
\r
7625 // Set the thread attributes for joinable and realtime scheduling
\r
7626 // priority (optional). The higher priority will only take affect
\r
7627 // if the program is run as root or suid. Note, under Linux
\r
7628 // processes with CAP_SYS_NICE privilege, a user can change
\r
7629 // scheduling policy and priority (thus need not be root). See
\r
7630 // POSIX "capabilities".
\r
7631 pthread_attr_t attr;
\r
7632 pthread_attr_init( &attr );
\r
7633 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7635 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7636 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7637 // We previously attempted to increase the audio callback priority
\r
7638 // to SCHED_RR here via the attributes. However, while no errors
\r
7639 // were reported in doing so, it did not work. So, now this is
\r
7640 // done in the alsaCallbackHandler function.
\r
7641 stream_.callbackInfo.doRealtime = true;
\r
7642 int priority = options->priority;
\r
7643 int min = sched_get_priority_min( SCHED_RR );
\r
7644 int max = sched_get_priority_max( SCHED_RR );
\r
7645 if ( priority < min ) priority = min;
\r
7646 else if ( priority > max ) priority = max;
\r
7647 stream_.callbackInfo.priority = priority;
\r
7651 stream_.callbackInfo.isRunning = true;
\r
7652 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7653 pthread_attr_destroy( &attr );
\r
7655 stream_.callbackInfo.isRunning = false;
\r
7656 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7665 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7666 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7667 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7669 stream_.apiHandle = 0;
\r
7672 if ( phandle) snd_pcm_close( phandle );
\r
7674 for ( int i=0; i<2; i++ ) {
\r
7675 if ( stream_.userBuffer[i] ) {
\r
7676 free( stream_.userBuffer[i] );
\r
7677 stream_.userBuffer[i] = 0;
\r
7681 if ( stream_.deviceBuffer ) {
\r
7682 free( stream_.deviceBuffer );
\r
7683 stream_.deviceBuffer = 0;
\r
7686 stream_.state = STREAM_CLOSED;
\r
7690 void RtApiAlsa :: closeStream()
\r
7692 if ( stream_.state == STREAM_CLOSED ) {
\r
7693 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7694 error( RtAudioError::WARNING );
\r
7698 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7699 stream_.callbackInfo.isRunning = false;
\r
7700 MUTEX_LOCK( &stream_.mutex );
\r
7701 if ( stream_.state == STREAM_STOPPED ) {
\r
7702 apiInfo->runnable = true;
\r
7703 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7705 MUTEX_UNLOCK( &stream_.mutex );
\r
7706 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7708 if ( stream_.state == STREAM_RUNNING ) {
\r
7709 stream_.state = STREAM_STOPPED;
\r
7710 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7711 snd_pcm_drop( apiInfo->handles[0] );
\r
7712 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7713 snd_pcm_drop( apiInfo->handles[1] );
\r
7717 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7718 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7719 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7721 stream_.apiHandle = 0;
\r
7724 for ( int i=0; i<2; i++ ) {
\r
7725 if ( stream_.userBuffer[i] ) {
\r
7726 free( stream_.userBuffer[i] );
\r
7727 stream_.userBuffer[i] = 0;
\r
7731 if ( stream_.deviceBuffer ) {
\r
7732 free( stream_.deviceBuffer );
\r
7733 stream_.deviceBuffer = 0;
\r
7736 stream_.mode = UNINITIALIZED;
\r
7737 stream_.state = STREAM_CLOSED;
\r
7740 void RtApiAlsa :: startStream()
\r
7742 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7745 if ( stream_.state == STREAM_RUNNING ) {
\r
7746 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7747 error( RtAudioError::WARNING );
\r
7751 MUTEX_LOCK( &stream_.mutex );
\r
7754 snd_pcm_state_t state;
\r
7755 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7756 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7757 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7758 state = snd_pcm_state( handle[0] );
\r
7759 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7760 result = snd_pcm_prepare( handle[0] );
\r
7761 if ( result < 0 ) {
\r
7762 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7763 errorText_ = errorStream_.str();
\r
7769 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7770 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7771 state = snd_pcm_state( handle[1] );
\r
7772 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7773 result = snd_pcm_prepare( handle[1] );
\r
7774 if ( result < 0 ) {
\r
7775 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7776 errorText_ = errorStream_.str();
\r
7782 stream_.state = STREAM_RUNNING;
\r
7785 apiInfo->runnable = true;
\r
7786 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7787 MUTEX_UNLOCK( &stream_.mutex );
\r
7789 if ( result >= 0 ) return;
\r
7790 error( RtAudioError::SYSTEM_ERROR );
\r
7793 void RtApiAlsa :: stopStream()
\r
7796 if ( stream_.state == STREAM_STOPPED ) {
\r
7797 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7798 error( RtAudioError::WARNING );
\r
7802 stream_.state = STREAM_STOPPED;
\r
7803 MUTEX_LOCK( &stream_.mutex );
\r
7806 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7807 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7808 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7809 if ( apiInfo->synchronized )
\r
7810 result = snd_pcm_drop( handle[0] );
\r
7812 result = snd_pcm_drain( handle[0] );
\r
7813 if ( result < 0 ) {
\r
7814 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7815 errorText_ = errorStream_.str();
\r
7820 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7821 result = snd_pcm_drop( handle[1] );
\r
7822 if ( result < 0 ) {
\r
7823 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7824 errorText_ = errorStream_.str();
\r
7830 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7831 MUTEX_UNLOCK( &stream_.mutex );
\r
7833 if ( result >= 0 ) return;
\r
7834 error( RtAudioError::SYSTEM_ERROR );
\r
7837 void RtApiAlsa :: abortStream()
\r
7840 if ( stream_.state == STREAM_STOPPED ) {
\r
7841 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7842 error( RtAudioError::WARNING );
\r
7846 stream_.state = STREAM_STOPPED;
\r
7847 MUTEX_LOCK( &stream_.mutex );
\r
7850 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7851 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7852 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7853 result = snd_pcm_drop( handle[0] );
\r
7854 if ( result < 0 ) {
\r
7855 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7856 errorText_ = errorStream_.str();
\r
7861 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7862 result = snd_pcm_drop( handle[1] );
\r
7863 if ( result < 0 ) {
\r
7864 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7865 errorText_ = errorStream_.str();
\r
7871 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7872 MUTEX_UNLOCK( &stream_.mutex );
\r
7874 if ( result >= 0 ) return;
\r
7875 error( RtAudioError::SYSTEM_ERROR );
\r
7878 void RtApiAlsa :: callbackEvent()
\r
7880 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7881 if ( stream_.state == STREAM_STOPPED ) {
\r
7882 MUTEX_LOCK( &stream_.mutex );
\r
7883 while ( !apiInfo->runnable )
\r
7884 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7886 if ( stream_.state != STREAM_RUNNING ) {
\r
7887 MUTEX_UNLOCK( &stream_.mutex );
\r
7890 MUTEX_UNLOCK( &stream_.mutex );
\r
7893 if ( stream_.state == STREAM_CLOSED ) {
\r
7894 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7895 error( RtAudioError::WARNING );
\r
7899 int doStopStream = 0;
\r
7900 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7901 double streamTime = getStreamTime();
\r
7902 RtAudioStreamStatus status = 0;
\r
7903 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7904 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7905 apiInfo->xrun[0] = false;
\r
7907 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7908 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7909 apiInfo->xrun[1] = false;
\r
7911 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7912 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7914 if ( doStopStream == 2 ) {
\r
7919 MUTEX_LOCK( &stream_.mutex );
\r
7921 // The state might change while waiting on a mutex.
\r
7922 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7927 snd_pcm_t **handle;
\r
7928 snd_pcm_sframes_t frames;
\r
7929 RtAudioFormat format;
\r
7930 handle = (snd_pcm_t **) apiInfo->handles;
\r
7932 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7934 // Setup parameters.
\r
7935 if ( stream_.doConvertBuffer[1] ) {
\r
7936 buffer = stream_.deviceBuffer;
\r
7937 channels = stream_.nDeviceChannels[1];
\r
7938 format = stream_.deviceFormat[1];
\r
7941 buffer = stream_.userBuffer[1];
\r
7942 channels = stream_.nUserChannels[1];
\r
7943 format = stream_.userFormat;
\r
7946 // Read samples from device in interleaved/non-interleaved format.
\r
7947 if ( stream_.deviceInterleaved[1] )
\r
7948 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7950 void *bufs[channels];
\r
7951 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7952 for ( int i=0; i<channels; i++ )
\r
7953 bufs[i] = (void *) (buffer + (i * offset));
\r
7954 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7957 if ( result < (int) stream_.bufferSize ) {
\r
7958 // Either an error or overrun occured.
\r
7959 if ( result == -EPIPE ) {
\r
7960 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7961 if ( state == SND_PCM_STATE_XRUN ) {
\r
7962 apiInfo->xrun[1] = true;
\r
7963 result = snd_pcm_prepare( handle[1] );
\r
7964 if ( result < 0 ) {
\r
7965 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7966 errorText_ = errorStream_.str();
\r
7970 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7971 errorText_ = errorStream_.str();
\r
7975 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7976 errorText_ = errorStream_.str();
\r
7978 error( RtAudioError::WARNING );
\r
7982 // Do byte swapping if necessary.
\r
7983 if ( stream_.doByteSwap[1] )
\r
7984 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7986 // Do buffer conversion if necessary.
\r
7987 if ( stream_.doConvertBuffer[1] )
\r
7988 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7990 // Check stream latency
\r
7991 result = snd_pcm_delay( handle[1], &frames );
\r
7992 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7997 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7999 // Setup parameters and do buffer conversion if necessary.
\r
8000 if ( stream_.doConvertBuffer[0] ) {
\r
8001 buffer = stream_.deviceBuffer;
\r
8002 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8003 channels = stream_.nDeviceChannels[0];
\r
8004 format = stream_.deviceFormat[0];
\r
8007 buffer = stream_.userBuffer[0];
\r
8008 channels = stream_.nUserChannels[0];
\r
8009 format = stream_.userFormat;
\r
8012 // Do byte swapping if necessary.
\r
8013 if ( stream_.doByteSwap[0] )
\r
8014 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8016 // Write samples to device in interleaved/non-interleaved format.
\r
8017 if ( stream_.deviceInterleaved[0] )
\r
8018 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8020 void *bufs[channels];
\r
8021 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8022 for ( int i=0; i<channels; i++ )
\r
8023 bufs[i] = (void *) (buffer + (i * offset));
\r
8024 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8027 if ( result < (int) stream_.bufferSize ) {
\r
8028 // Either an error or underrun occured.
\r
8029 if ( result == -EPIPE ) {
\r
8030 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8031 if ( state == SND_PCM_STATE_XRUN ) {
\r
8032 apiInfo->xrun[0] = true;
\r
8033 result = snd_pcm_prepare( handle[0] );
\r
8034 if ( result < 0 ) {
\r
8035 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8036 errorText_ = errorStream_.str();
\r
8039 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8042 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8043 errorText_ = errorStream_.str();
\r
8047 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8048 errorText_ = errorStream_.str();
\r
8050 error( RtAudioError::WARNING );
\r
8054 // Check stream latency
\r
8055 result = snd_pcm_delay( handle[0], &frames );
\r
8056 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8060 MUTEX_UNLOCK( &stream_.mutex );
\r
8062 RtApi::tickStreamTime();
\r
8063 if ( doStopStream == 1 ) this->stopStream();
\r
8066 static void *alsaCallbackHandler( void *ptr )
\r
8068 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8069 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8070 bool *isRunning = &info->isRunning;
\r
8072 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8073 if ( &info->doRealtime ) {
\r
8074 pthread_t tID = pthread_self(); // ID of this thread
\r
8075 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8076 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8080 while ( *isRunning == true ) {
\r
8081 pthread_testcancel();
\r
8082 object->callbackEvent();
\r
8085 pthread_exit( NULL );
\r
8088 //******************** End of __LINUX_ALSA__ *********************//
\r
8091 #if defined(__LINUX_PULSE__)
\r
8093 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8094 // and Tristan Matthews.
\r
8096 #include <pulse/error.h>
\r
8097 #include <pulse/simple.h>
\r
8100 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8101 44100, 48000, 96000, 0};
\r
8103 struct rtaudio_pa_format_mapping_t {
\r
8104 RtAudioFormat rtaudio_format;
\r
8105 pa_sample_format_t pa_format;
\r
8108 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8109 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8110 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8111 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8112 {0, PA_SAMPLE_INVALID}};
\r
8114 struct PulseAudioHandle {
\r
8115 pa_simple *s_play;
\r
8118 pthread_cond_t runnable_cv;
\r
8120 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8123 RtApiPulse::~RtApiPulse()
\r
8125 if ( stream_.state != STREAM_CLOSED )
\r
8129 unsigned int RtApiPulse::getDeviceCount( void )
\r
8134 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8136 RtAudio::DeviceInfo info;
\r
8137 info.probed = true;
\r
8138 info.name = "PulseAudio";
\r
8139 info.outputChannels = 2;
\r
8140 info.inputChannels = 2;
\r
8141 info.duplexChannels = 2;
\r
8142 info.isDefaultOutput = true;
\r
8143 info.isDefaultInput = true;
\r
8145 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8146 info.sampleRates.push_back( *sr );
\r
8148 info.preferredSampleRate = 48000;
\r
8149 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8154 static void *pulseaudio_callback( void * user )
\r
8156 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8157 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8158 volatile bool *isRunning = &cbi->isRunning;
\r
8160 while ( *isRunning ) {
\r
8161 pthread_testcancel();
\r
8162 context->callbackEvent();
\r
8165 pthread_exit( NULL );
\r
8168 void RtApiPulse::closeStream( void )
\r
8170 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8172 stream_.callbackInfo.isRunning = false;
\r
8174 MUTEX_LOCK( &stream_.mutex );
\r
8175 if ( stream_.state == STREAM_STOPPED ) {
\r
8176 pah->runnable = true;
\r
8177 pthread_cond_signal( &pah->runnable_cv );
\r
8179 MUTEX_UNLOCK( &stream_.mutex );
\r
8181 pthread_join( pah->thread, 0 );
\r
8182 if ( pah->s_play ) {
\r
8183 pa_simple_flush( pah->s_play, NULL );
\r
8184 pa_simple_free( pah->s_play );
\r
8187 pa_simple_free( pah->s_rec );
\r
8189 pthread_cond_destroy( &pah->runnable_cv );
\r
8191 stream_.apiHandle = 0;
\r
8194 if ( stream_.userBuffer[0] ) {
\r
8195 free( stream_.userBuffer[0] );
\r
8196 stream_.userBuffer[0] = 0;
\r
8198 if ( stream_.userBuffer[1] ) {
\r
8199 free( stream_.userBuffer[1] );
\r
8200 stream_.userBuffer[1] = 0;
\r
8203 stream_.state = STREAM_CLOSED;
\r
8204 stream_.mode = UNINITIALIZED;
\r
8207 void RtApiPulse::callbackEvent( void )
\r
8209 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8211 if ( stream_.state == STREAM_STOPPED ) {
\r
8212 MUTEX_LOCK( &stream_.mutex );
\r
8213 while ( !pah->runnable )
\r
8214 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8216 if ( stream_.state != STREAM_RUNNING ) {
\r
8217 MUTEX_UNLOCK( &stream_.mutex );
\r
8220 MUTEX_UNLOCK( &stream_.mutex );
\r
8223 if ( stream_.state == STREAM_CLOSED ) {
\r
8224 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8225 "this shouldn't happen!";
\r
8226 error( RtAudioError::WARNING );
\r
8230 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8231 double streamTime = getStreamTime();
\r
8232 RtAudioStreamStatus status = 0;
\r
8233 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8234 stream_.bufferSize, streamTime, status,
\r
8235 stream_.callbackInfo.userData );
\r
8237 if ( doStopStream == 2 ) {
\r
8242 MUTEX_LOCK( &stream_.mutex );
\r
8243 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8244 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8246 if ( stream_.state != STREAM_RUNNING )
\r
8251 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8252 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8253 convertBuffer( stream_.deviceBuffer,
\r
8254 stream_.userBuffer[OUTPUT],
\r
8255 stream_.convertInfo[OUTPUT] );
\r
8256 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8257 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8259 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8260 formatBytes( stream_.userFormat );
\r
8262 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8263 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8264 pa_strerror( pa_error ) << ".";
\r
8265 errorText_ = errorStream_.str();
\r
8266 error( RtAudioError::WARNING );
\r
8270 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8271 if ( stream_.doConvertBuffer[INPUT] )
\r
8272 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8273 formatBytes( stream_.deviceFormat[INPUT] );
\r
8275 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8276 formatBytes( stream_.userFormat );
\r
8278 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8279 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8280 pa_strerror( pa_error ) << ".";
\r
8281 errorText_ = errorStream_.str();
\r
8282 error( RtAudioError::WARNING );
\r
8284 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8285 convertBuffer( stream_.userBuffer[INPUT],
\r
8286 stream_.deviceBuffer,
\r
8287 stream_.convertInfo[INPUT] );
\r
8292 MUTEX_UNLOCK( &stream_.mutex );
\r
8293 RtApi::tickStreamTime();
\r
8295 if ( doStopStream == 1 )
\r
8299 void RtApiPulse::startStream( void )
\r
8301 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8303 if ( stream_.state == STREAM_CLOSED ) {
\r
8304 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8305 error( RtAudioError::INVALID_USE );
\r
8308 if ( stream_.state == STREAM_RUNNING ) {
\r
8309 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8310 error( RtAudioError::WARNING );
\r
8314 MUTEX_LOCK( &stream_.mutex );
\r
8316 stream_.state = STREAM_RUNNING;
\r
8318 pah->runnable = true;
\r
8319 pthread_cond_signal( &pah->runnable_cv );
\r
8320 MUTEX_UNLOCK( &stream_.mutex );
\r
8323 void RtApiPulse::stopStream( void )
\r
8325 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8327 if ( stream_.state == STREAM_CLOSED ) {
\r
8328 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8329 error( RtAudioError::INVALID_USE );
\r
8332 if ( stream_.state == STREAM_STOPPED ) {
\r
8333 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8334 error( RtAudioError::WARNING );
\r
8338 stream_.state = STREAM_STOPPED;
\r
8339 MUTEX_LOCK( &stream_.mutex );
\r
8341 if ( pah && pah->s_play ) {
\r
8343 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8344 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8345 pa_strerror( pa_error ) << ".";
\r
8346 errorText_ = errorStream_.str();
\r
8347 MUTEX_UNLOCK( &stream_.mutex );
\r
8348 error( RtAudioError::SYSTEM_ERROR );
\r
8353 stream_.state = STREAM_STOPPED;
\r
8354 MUTEX_UNLOCK( &stream_.mutex );
\r
8357 void RtApiPulse::abortStream( void )
\r
8359 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8361 if ( stream_.state == STREAM_CLOSED ) {
\r
8362 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8363 error( RtAudioError::INVALID_USE );
\r
8366 if ( stream_.state == STREAM_STOPPED ) {
\r
8367 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8368 error( RtAudioError::WARNING );
\r
8372 stream_.state = STREAM_STOPPED;
\r
8373 MUTEX_LOCK( &stream_.mutex );
\r
8375 if ( pah && pah->s_play ) {
\r
8377 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8378 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8379 pa_strerror( pa_error ) << ".";
\r
8380 errorText_ = errorStream_.str();
\r
8381 MUTEX_UNLOCK( &stream_.mutex );
\r
8382 error( RtAudioError::SYSTEM_ERROR );
\r
8387 stream_.state = STREAM_STOPPED;
\r
8388 MUTEX_UNLOCK( &stream_.mutex );
\r
8391 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8392 unsigned int channels, unsigned int firstChannel,
\r
8393 unsigned int sampleRate, RtAudioFormat format,
\r
8394 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8396 PulseAudioHandle *pah = 0;
\r
8397 unsigned long bufferBytes = 0;
\r
8398 pa_sample_spec ss;
\r
8400 if ( device != 0 ) return false;
\r
8401 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8402 if ( channels != 1 && channels != 2 ) {
\r
8403 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8406 ss.channels = channels;
\r
8408 if ( firstChannel != 0 ) return false;
\r
8410 bool sr_found = false;
\r
8411 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8412 if ( sampleRate == *sr ) {
\r
8414 stream_.sampleRate = sampleRate;
\r
8415 ss.rate = sampleRate;
\r
8419 if ( !sr_found ) {
\r
8420 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8424 bool sf_found = 0;
\r
8425 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8426 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8427 if ( format == sf->rtaudio_format ) {
\r
8429 stream_.userFormat = sf->rtaudio_format;
\r
8430 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8431 ss.format = sf->pa_format;
\r
8435 if ( !sf_found ) { // Use internal data format conversion.
\r
8436 stream_.userFormat = format;
\r
8437 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8438 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8441 // Set other stream parameters.
\r
8442 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8443 else stream_.userInterleaved = true;
\r
8444 stream_.deviceInterleaved[mode] = true;
\r
8445 stream_.nBuffers = 1;
\r
8446 stream_.doByteSwap[mode] = false;
\r
8447 stream_.nUserChannels[mode] = channels;
\r
8448 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8449 stream_.channelOffset[mode] = 0;
\r
8450 std::string streamName = "RtAudio";
\r
8452 // Set flags for buffer conversion.
\r
8453 stream_.doConvertBuffer[mode] = false;
\r
8454 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8455 stream_.doConvertBuffer[mode] = true;
\r
8456 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8457 stream_.doConvertBuffer[mode] = true;
\r
8459 // Allocate necessary internal buffers.
\r
8460 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8461 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8462 if ( stream_.userBuffer[mode] == NULL ) {
\r
8463 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8466 stream_.bufferSize = *bufferSize;
\r
8468 if ( stream_.doConvertBuffer[mode] ) {
\r
8470 bool makeBuffer = true;
\r
8471 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8472 if ( mode == INPUT ) {
\r
8473 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8474 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8475 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8479 if ( makeBuffer ) {
\r
8480 bufferBytes *= *bufferSize;
\r
8481 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8482 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8483 if ( stream_.deviceBuffer == NULL ) {
\r
8484 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8490 stream_.device[mode] = device;
\r
8492 // Setup the buffer conversion information structure.
\r
8493 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8495 if ( !stream_.apiHandle ) {
\r
8496 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8498 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8502 stream_.apiHandle = pah;
\r
8503 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8504 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8508 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8511 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8514 pa_buffer_attr buffer_attr;
\r
8515 buffer_attr.fragsize = bufferBytes;
\r
8516 buffer_attr.maxlength = -1;
\r
8518 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8519 if ( !pah->s_rec ) {
\r
8520 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8525 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8526 if ( !pah->s_play ) {
\r
8527 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8535 if ( stream_.mode == UNINITIALIZED )
\r
8536 stream_.mode = mode;
\r
8537 else if ( stream_.mode == mode )
\r
8540 stream_.mode = DUPLEX;
\r
8542 if ( !stream_.callbackInfo.isRunning ) {
\r
8543 stream_.callbackInfo.object = this;
\r
8544 stream_.callbackInfo.isRunning = true;
\r
8545 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8546 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8551 stream_.state = STREAM_STOPPED;
\r
8555 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8556 pthread_cond_destroy( &pah->runnable_cv );
\r
8558 stream_.apiHandle = 0;
\r
8561 for ( int i=0; i<2; i++ ) {
\r
8562 if ( stream_.userBuffer[i] ) {
\r
8563 free( stream_.userBuffer[i] );
\r
8564 stream_.userBuffer[i] = 0;
\r
8568 if ( stream_.deviceBuffer ) {
\r
8569 free( stream_.deviceBuffer );
\r
8570 stream_.deviceBuffer = 0;
\r
8576 //******************** End of __LINUX_PULSE__ *********************//
\r
8579 #if defined(__LINUX_OSS__)
\r
8581 #include <unistd.h>
\r
8582 #include <sys/ioctl.h>
\r
8583 #include <unistd.h>
\r
8584 #include <fcntl.h>
\r
8585 #include <sys/soundcard.h>
\r
8586 #include <errno.h>
\r
8589 static void *ossCallbackHandler(void * ptr);
\r
8591 // A structure to hold various information related to the OSS API
\r
8592 // implementation.
\r
8593 struct OssHandle {
\r
8594 int id[2]; // device ids
\r
8597 pthread_cond_t runnable;
\r
8600 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8603 RtApiOss :: RtApiOss()
\r
8605 // Nothing to do here.
\r
8608 RtApiOss :: ~RtApiOss()
\r
8610 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8613 unsigned int RtApiOss :: getDeviceCount( void )
\r
8615 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8616 if ( mixerfd == -1 ) {
\r
8617 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8618 error( RtAudioError::WARNING );
\r
8622 oss_sysinfo sysinfo;
\r
8623 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8625 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8626 error( RtAudioError::WARNING );
\r
8631 return sysinfo.numaudios;
\r
8634 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8636 RtAudio::DeviceInfo info;
\r
8637 info.probed = false;
\r
8639 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8640 if ( mixerfd == -1 ) {
\r
8641 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8642 error( RtAudioError::WARNING );
\r
8646 oss_sysinfo sysinfo;
\r
8647 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8648 if ( result == -1 ) {
\r
8650 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8651 error( RtAudioError::WARNING );
\r
8655 unsigned nDevices = sysinfo.numaudios;
\r
8656 if ( nDevices == 0 ) {
\r
8658 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8659 error( RtAudioError::INVALID_USE );
\r
8663 if ( device >= nDevices ) {
\r
8665 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8666 error( RtAudioError::INVALID_USE );
\r
8670 oss_audioinfo ainfo;
\r
8671 ainfo.dev = device;
\r
8672 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8674 if ( result == -1 ) {
\r
8675 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8676 errorText_ = errorStream_.str();
\r
8677 error( RtAudioError::WARNING );
\r
8682 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8683 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8684 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8685 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8686 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8689 // Probe data formats ... do for input
\r
8690 unsigned long mask = ainfo.iformats;
\r
8691 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8692 info.nativeFormats |= RTAUDIO_SINT16;
\r
8693 if ( mask & AFMT_S8 )
\r
8694 info.nativeFormats |= RTAUDIO_SINT8;
\r
8695 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8696 info.nativeFormats |= RTAUDIO_SINT32;
\r
8697 if ( mask & AFMT_FLOAT )
\r
8698 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8699 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8700 info.nativeFormats |= RTAUDIO_SINT24;
\r
8702 // Check that we have at least one supported format
\r
8703 if ( info.nativeFormats == 0 ) {
\r
8704 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8705 errorText_ = errorStream_.str();
\r
8706 error( RtAudioError::WARNING );
\r
8710 // Probe the supported sample rates.
\r
8711 info.sampleRates.clear();
\r
8712 if ( ainfo.nrates ) {
\r
8713 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8714 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8715 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8716 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8718 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8719 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8727 // Check min and max rate values;
\r
8728 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8729 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8730 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8732 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8733 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8738 if ( info.sampleRates.size() == 0 ) {
\r
8739 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8740 errorText_ = errorStream_.str();
\r
8741 error( RtAudioError::WARNING );
\r
8744 info.probed = true;
\r
8745 info.name = ainfo.name;
\r
8752 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8753 unsigned int firstChannel, unsigned int sampleRate,
\r
8754 RtAudioFormat format, unsigned int *bufferSize,
\r
8755 RtAudio::StreamOptions *options )
\r
8757 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8758 if ( mixerfd == -1 ) {
\r
8759 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8763 oss_sysinfo sysinfo;
\r
8764 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8765 if ( result == -1 ) {
\r
8767 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8771 unsigned nDevices = sysinfo.numaudios;
\r
8772 if ( nDevices == 0 ) {
\r
8773 // This should not happen because a check is made before this function is called.
\r
8775 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8779 if ( device >= nDevices ) {
\r
8780 // This should not happen because a check is made before this function is called.
\r
8782 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8786 oss_audioinfo ainfo;
\r
8787 ainfo.dev = device;
\r
8788 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8790 if ( result == -1 ) {
\r
8791 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8792 errorText_ = errorStream_.str();
\r
8796 // Check if device supports input or output
\r
8797 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8798 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8799 if ( mode == OUTPUT )
\r
8800 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8802 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8803 errorText_ = errorStream_.str();
\r
8808 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8809 if ( mode == OUTPUT )
\r
8810 flags |= O_WRONLY;
\r
8811 else { // mode == INPUT
\r
8812 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8813 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8814 close( handle->id[0] );
\r
8815 handle->id[0] = 0;
\r
8816 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8817 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8818 errorText_ = errorStream_.str();
\r
8821 // Check that the number previously set channels is the same.
\r
8822 if ( stream_.nUserChannels[0] != channels ) {
\r
8823 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8824 errorText_ = errorStream_.str();
\r
8830 flags |= O_RDONLY;
\r
8833 // Set exclusive access if specified.
\r
8834 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8836 // Try to open the device.
\r
8838 fd = open( ainfo.devnode, flags, 0 );
\r
8840 if ( errno == EBUSY )
\r
8841 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8843 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8844 errorText_ = errorStream_.str();
\r
8848 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8850 if ( flags | O_RDWR ) {
\r
8851 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8852 if ( result == -1) {
\r
8853 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8854 errorText_ = errorStream_.str();
\r
8860 // Check the device channel support.
\r
8861 stream_.nUserChannels[mode] = channels;
\r
8862 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8864 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8865 errorText_ = errorStream_.str();
\r
8869 // Set the number of channels.
\r
8870 int deviceChannels = channels + firstChannel;
\r
8871 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8872 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8874 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8875 errorText_ = errorStream_.str();
\r
8878 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8880 // Get the data format mask
\r
8882 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8883 if ( result == -1 ) {
\r
8885 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8886 errorText_ = errorStream_.str();
\r
8890 // Determine how to set the device format.
\r
8891 stream_.userFormat = format;
\r
8892 int deviceFormat = -1;
\r
8893 stream_.doByteSwap[mode] = false;
\r
8894 if ( format == RTAUDIO_SINT8 ) {
\r
8895 if ( mask & AFMT_S8 ) {
\r
8896 deviceFormat = AFMT_S8;
\r
8897 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8900 else if ( format == RTAUDIO_SINT16 ) {
\r
8901 if ( mask & AFMT_S16_NE ) {
\r
8902 deviceFormat = AFMT_S16_NE;
\r
8903 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8905 else if ( mask & AFMT_S16_OE ) {
\r
8906 deviceFormat = AFMT_S16_OE;
\r
8907 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8908 stream_.doByteSwap[mode] = true;
\r
8911 else if ( format == RTAUDIO_SINT24 ) {
\r
8912 if ( mask & AFMT_S24_NE ) {
\r
8913 deviceFormat = AFMT_S24_NE;
\r
8914 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8916 else if ( mask & AFMT_S24_OE ) {
\r
8917 deviceFormat = AFMT_S24_OE;
\r
8918 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8919 stream_.doByteSwap[mode] = true;
\r
8922 else if ( format == RTAUDIO_SINT32 ) {
\r
8923 if ( mask & AFMT_S32_NE ) {
\r
8924 deviceFormat = AFMT_S32_NE;
\r
8925 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8927 else if ( mask & AFMT_S32_OE ) {
\r
8928 deviceFormat = AFMT_S32_OE;
\r
8929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8930 stream_.doByteSwap[mode] = true;
\r
8934 if ( deviceFormat == -1 ) {
\r
8935 // The user requested format is not natively supported by the device.
\r
8936 if ( mask & AFMT_S16_NE ) {
\r
8937 deviceFormat = AFMT_S16_NE;
\r
8938 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8940 else if ( mask & AFMT_S32_NE ) {
\r
8941 deviceFormat = AFMT_S32_NE;
\r
8942 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8944 else if ( mask & AFMT_S24_NE ) {
\r
8945 deviceFormat = AFMT_S24_NE;
\r
8946 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8948 else if ( mask & AFMT_S16_OE ) {
\r
8949 deviceFormat = AFMT_S16_OE;
\r
8950 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8951 stream_.doByteSwap[mode] = true;
\r
8953 else if ( mask & AFMT_S32_OE ) {
\r
8954 deviceFormat = AFMT_S32_OE;
\r
8955 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8956 stream_.doByteSwap[mode] = true;
\r
8958 else if ( mask & AFMT_S24_OE ) {
\r
8959 deviceFormat = AFMT_S24_OE;
\r
8960 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8961 stream_.doByteSwap[mode] = true;
\r
8963 else if ( mask & AFMT_S8) {
\r
8964 deviceFormat = AFMT_S8;
\r
8965 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8969 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8970 // This really shouldn't happen ...
\r
8972 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8973 errorText_ = errorStream_.str();
\r
8977 // Set the data format.
\r
8978 int temp = deviceFormat;
\r
8979 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8980 if ( result == -1 || deviceFormat != temp ) {
\r
8982 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8983 errorText_ = errorStream_.str();
\r
8987 // Attempt to set the buffer size. According to OSS, the minimum
\r
8988 // number of buffers is two. The supposed minimum buffer size is 16
\r
8989 // bytes, so that will be our lower bound. The argument to this
\r
8990 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8991 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8992 // We'll check the actual value used near the end of the setup
\r
8994 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8995 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8997 if ( options ) buffers = options->numberOfBuffers;
\r
8998 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8999 if ( buffers < 2 ) buffers = 3;
\r
9000 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9001 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9002 if ( result == -1 ) {
\r
9004 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9005 errorText_ = errorStream_.str();
\r
9008 stream_.nBuffers = buffers;
\r
9010 // Save buffer size (in sample frames).
\r
9011 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9012 stream_.bufferSize = *bufferSize;
\r
9014 // Set the sample rate.
\r
9015 int srate = sampleRate;
\r
9016 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9017 if ( result == -1 ) {
\r
9019 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9020 errorText_ = errorStream_.str();
\r
9024 // Verify the sample rate setup worked.
\r
9025 if ( abs( srate - sampleRate ) > 100 ) {
\r
9027 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9028 errorText_ = errorStream_.str();
\r
9031 stream_.sampleRate = sampleRate;
\r
9033 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9034 // We're doing duplex setup here.
\r
9035 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9036 stream_.nDeviceChannels[0] = deviceChannels;
\r
9039 // Set interleaving parameters.
\r
9040 stream_.userInterleaved = true;
\r
9041 stream_.deviceInterleaved[mode] = true;
\r
9042 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9043 stream_.userInterleaved = false;
\r
9045 // Set flags for buffer conversion
\r
9046 stream_.doConvertBuffer[mode] = false;
\r
9047 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9048 stream_.doConvertBuffer[mode] = true;
\r
9049 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9050 stream_.doConvertBuffer[mode] = true;
\r
9051 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9052 stream_.nUserChannels[mode] > 1 )
\r
9053 stream_.doConvertBuffer[mode] = true;
\r
9055 // Allocate the stream handles if necessary and then save.
\r
9056 if ( stream_.apiHandle == 0 ) {
\r
9058 handle = new OssHandle;
\r
9060 catch ( std::bad_alloc& ) {
\r
9061 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9065 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9066 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9070 stream_.apiHandle = (void *) handle;
\r
9073 handle = (OssHandle *) stream_.apiHandle;
\r
9075 handle->id[mode] = fd;
\r
9077 // Allocate necessary internal buffers.
\r
9078 unsigned long bufferBytes;
\r
9079 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9080 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9081 if ( stream_.userBuffer[mode] == NULL ) {
\r
9082 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9086 if ( stream_.doConvertBuffer[mode] ) {
\r
9088 bool makeBuffer = true;
\r
9089 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9090 if ( mode == INPUT ) {
\r
9091 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9092 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9093 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9097 if ( makeBuffer ) {
\r
9098 bufferBytes *= *bufferSize;
\r
9099 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9100 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9101 if ( stream_.deviceBuffer == NULL ) {
\r
9102 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9108 stream_.device[mode] = device;
\r
9109 stream_.state = STREAM_STOPPED;
\r
9111 // Setup the buffer conversion information structure.
\r
9112 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9114 // Setup thread if necessary.
\r
9115 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9116 // We had already set up an output stream.
\r
9117 stream_.mode = DUPLEX;
\r
9118 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9121 stream_.mode = mode;
\r
9123 // Setup callback thread.
\r
9124 stream_.callbackInfo.object = (void *) this;
\r
9126 // Set the thread attributes for joinable and realtime scheduling
\r
9127 // priority. The higher priority will only take affect if the
\r
9128 // program is run as root or suid.
\r
9129 pthread_attr_t attr;
\r
9130 pthread_attr_init( &attr );
\r
9131 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9132 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9133 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9134 struct sched_param param;
\r
9135 int priority = options->priority;
\r
9136 int min = sched_get_priority_min( SCHED_RR );
\r
9137 int max = sched_get_priority_max( SCHED_RR );
\r
9138 if ( priority < min ) priority = min;
\r
9139 else if ( priority > max ) priority = max;
\r
9140 param.sched_priority = priority;
\r
9141 pthread_attr_setschedparam( &attr, ¶m );
\r
9142 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9145 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9147 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9150 stream_.callbackInfo.isRunning = true;
\r
9151 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9152 pthread_attr_destroy( &attr );
\r
9154 stream_.callbackInfo.isRunning = false;
\r
9155 errorText_ = "RtApiOss::error creating callback thread!";
\r
9164 pthread_cond_destroy( &handle->runnable );
\r
9165 if ( handle->id[0] ) close( handle->id[0] );
\r
9166 if ( handle->id[1] ) close( handle->id[1] );
\r
9168 stream_.apiHandle = 0;
\r
9171 for ( int i=0; i<2; i++ ) {
\r
9172 if ( stream_.userBuffer[i] ) {
\r
9173 free( stream_.userBuffer[i] );
\r
9174 stream_.userBuffer[i] = 0;
\r
9178 if ( stream_.deviceBuffer ) {
\r
9179 free( stream_.deviceBuffer );
\r
9180 stream_.deviceBuffer = 0;
\r
9186 void RtApiOss :: closeStream()
\r
9188 if ( stream_.state == STREAM_CLOSED ) {
\r
9189 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9190 error( RtAudioError::WARNING );
\r
9194 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9195 stream_.callbackInfo.isRunning = false;
\r
9196 MUTEX_LOCK( &stream_.mutex );
\r
9197 if ( stream_.state == STREAM_STOPPED )
\r
9198 pthread_cond_signal( &handle->runnable );
\r
9199 MUTEX_UNLOCK( &stream_.mutex );
\r
9200 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9202 if ( stream_.state == STREAM_RUNNING ) {
\r
9203 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9204 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9206 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9207 stream_.state = STREAM_STOPPED;
\r
9211 pthread_cond_destroy( &handle->runnable );
\r
9212 if ( handle->id[0] ) close( handle->id[0] );
\r
9213 if ( handle->id[1] ) close( handle->id[1] );
\r
9215 stream_.apiHandle = 0;
\r
9218 for ( int i=0; i<2; i++ ) {
\r
9219 if ( stream_.userBuffer[i] ) {
\r
9220 free( stream_.userBuffer[i] );
\r
9221 stream_.userBuffer[i] = 0;
\r
9225 if ( stream_.deviceBuffer ) {
\r
9226 free( stream_.deviceBuffer );
\r
9227 stream_.deviceBuffer = 0;
\r
9230 stream_.mode = UNINITIALIZED;
\r
9231 stream_.state = STREAM_CLOSED;
\r
9234 void RtApiOss :: startStream()
\r
9237 if ( stream_.state == STREAM_RUNNING ) {
\r
9238 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9239 error( RtAudioError::WARNING );
\r
9243 MUTEX_LOCK( &stream_.mutex );
\r
9245 stream_.state = STREAM_RUNNING;
\r
9247 // No need to do anything else here ... OSS automatically starts
\r
9248 // when fed samples.
\r
9250 MUTEX_UNLOCK( &stream_.mutex );
\r
9252 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9253 pthread_cond_signal( &handle->runnable );
\r
9256 void RtApiOss :: stopStream()
\r
9259 if ( stream_.state == STREAM_STOPPED ) {
\r
9260 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9261 error( RtAudioError::WARNING );
\r
9265 MUTEX_LOCK( &stream_.mutex );
\r
9267 // The state might change while waiting on a mutex.
\r
9268 if ( stream_.state == STREAM_STOPPED ) {
\r
9269 MUTEX_UNLOCK( &stream_.mutex );
\r
9274 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9275 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9277 // Flush the output with zeros a few times.
\r
9280 RtAudioFormat format;
\r
9282 if ( stream_.doConvertBuffer[0] ) {
\r
9283 buffer = stream_.deviceBuffer;
\r
9284 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9285 format = stream_.deviceFormat[0];
\r
9288 buffer = stream_.userBuffer[0];
\r
9289 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9290 format = stream_.userFormat;
\r
9293 memset( buffer, 0, samples * formatBytes(format) );
\r
9294 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9295 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9296 if ( result == -1 ) {
\r
9297 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9298 error( RtAudioError::WARNING );
\r
9302 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9303 if ( result == -1 ) {
\r
9304 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9305 errorText_ = errorStream_.str();
\r
9308 handle->triggered = false;
\r
9311 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9312 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9313 if ( result == -1 ) {
\r
9314 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9315 errorText_ = errorStream_.str();
\r
9321 stream_.state = STREAM_STOPPED;
\r
9322 MUTEX_UNLOCK( &stream_.mutex );
\r
9324 if ( result != -1 ) return;
\r
9325 error( RtAudioError::SYSTEM_ERROR );
\r
9328 void RtApiOss :: abortStream()
\r
9331 if ( stream_.state == STREAM_STOPPED ) {
\r
9332 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9333 error( RtAudioError::WARNING );
\r
9337 MUTEX_LOCK( &stream_.mutex );
\r
9339 // The state might change while waiting on a mutex.
\r
9340 if ( stream_.state == STREAM_STOPPED ) {
\r
9341 MUTEX_UNLOCK( &stream_.mutex );
\r
9346 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9347 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9348 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9349 if ( result == -1 ) {
\r
9350 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9351 errorText_ = errorStream_.str();
\r
9354 handle->triggered = false;
\r
9357 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9358 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9359 if ( result == -1 ) {
\r
9360 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9361 errorText_ = errorStream_.str();
\r
9367 stream_.state = STREAM_STOPPED;
\r
9368 MUTEX_UNLOCK( &stream_.mutex );
\r
9370 if ( result != -1 ) return;
\r
9371 error( RtAudioError::SYSTEM_ERROR );
\r
9374 void RtApiOss :: callbackEvent()
\r
9376 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9377 if ( stream_.state == STREAM_STOPPED ) {
\r
9378 MUTEX_LOCK( &stream_.mutex );
\r
9379 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9380 if ( stream_.state != STREAM_RUNNING ) {
\r
9381 MUTEX_UNLOCK( &stream_.mutex );
\r
9384 MUTEX_UNLOCK( &stream_.mutex );
\r
9387 if ( stream_.state == STREAM_CLOSED ) {
\r
9388 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9389 error( RtAudioError::WARNING );
\r
9393 // Invoke user callback to get fresh output data.
\r
9394 int doStopStream = 0;
\r
9395 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9396 double streamTime = getStreamTime();
\r
9397 RtAudioStreamStatus status = 0;
\r
9398 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9399 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9400 handle->xrun[0] = false;
\r
9402 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9403 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9404 handle->xrun[1] = false;
\r
9406 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9407 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9408 if ( doStopStream == 2 ) {
\r
9409 this->abortStream();
\r
9413 MUTEX_LOCK( &stream_.mutex );
\r
9415 // The state might change while waiting on a mutex.
\r
9416 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9421 RtAudioFormat format;
\r
9423 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9425 // Setup parameters and do buffer conversion if necessary.
\r
9426 if ( stream_.doConvertBuffer[0] ) {
\r
9427 buffer = stream_.deviceBuffer;
\r
9428 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9429 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9430 format = stream_.deviceFormat[0];
\r
9433 buffer = stream_.userBuffer[0];
\r
9434 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9435 format = stream_.userFormat;
\r
9438 // Do byte swapping if necessary.
\r
9439 if ( stream_.doByteSwap[0] )
\r
9440 byteSwapBuffer( buffer, samples, format );
\r
9442 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9444 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9445 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9446 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9447 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9448 handle->triggered = true;
\r
9451 // Write samples to device.
\r
9452 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9454 if ( result == -1 ) {
\r
9455 // We'll assume this is an underrun, though there isn't a
\r
9456 // specific means for determining that.
\r
9457 handle->xrun[0] = true;
\r
9458 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9459 error( RtAudioError::WARNING );
\r
9460 // Continue on to input section.
\r
9464 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9466 // Setup parameters.
\r
9467 if ( stream_.doConvertBuffer[1] ) {
\r
9468 buffer = stream_.deviceBuffer;
\r
9469 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9470 format = stream_.deviceFormat[1];
\r
9473 buffer = stream_.userBuffer[1];
\r
9474 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9475 format = stream_.userFormat;
\r
9478 // Read samples from device.
\r
9479 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9481 if ( result == -1 ) {
\r
9482 // We'll assume this is an overrun, though there isn't a
\r
9483 // specific means for determining that.
\r
9484 handle->xrun[1] = true;
\r
9485 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9486 error( RtAudioError::WARNING );
\r
9490 // Do byte swapping if necessary.
\r
9491 if ( stream_.doByteSwap[1] )
\r
9492 byteSwapBuffer( buffer, samples, format );
\r
9494 // Do buffer conversion if necessary.
\r
9495 if ( stream_.doConvertBuffer[1] )
\r
9496 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9500 MUTEX_UNLOCK( &stream_.mutex );
\r
9502 RtApi::tickStreamTime();
\r
9503 if ( doStopStream == 1 ) this->stopStream();
\r
9506 static void *ossCallbackHandler( void *ptr )
\r
9508 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9509 RtApiOss *object = (RtApiOss *) info->object;
\r
9510 bool *isRunning = &info->isRunning;
\r
9512 while ( *isRunning == true ) {
\r
9513 pthread_testcancel();
\r
9514 object->callbackEvent();
\r
9517 pthread_exit( NULL );
\r
9520 //******************** End of __LINUX_OSS__ *********************//
\r
9524 // *************************************************** //
\r
9526 // Protected common (OS-independent) RtAudio methods.
\r
9528 // *************************************************** //
\r
9530 // This method can be modified to control the behavior of error
\r
9531 // message printing.
\r
9532 void RtApi :: error( RtAudioError::Type type )
\r
9534 errorStream_.str(""); // clear the ostringstream
\r
9536 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9537 if ( errorCallback ) {
\r
9538 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9540 if ( firstErrorOccurred_ )
\r
9543 firstErrorOccurred_ = true;
\r
9544 const std::string errorMessage = errorText_;
\r
9546 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9547 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9551 errorCallback( type, errorMessage );
\r
9552 firstErrorOccurred_ = false;
\r
9556 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9557 std::cerr << '\n' << errorText_ << "\n\n";
\r
9558 else if ( type != RtAudioError::WARNING )
\r
9559 throw( RtAudioError( errorText_, type ) );
\r
9562 void RtApi :: verifyStream()
\r
9564 if ( stream_.state == STREAM_CLOSED ) {
\r
9565 errorText_ = "RtApi:: a stream is not open!";
\r
9566 error( RtAudioError::INVALID_USE );
\r
9570 void RtApi :: clearStreamInfo()
\r
9572 stream_.mode = UNINITIALIZED;
\r
9573 stream_.state = STREAM_CLOSED;
\r
9574 stream_.sampleRate = 0;
\r
9575 stream_.bufferSize = 0;
\r
9576 stream_.nBuffers = 0;
\r
9577 stream_.userFormat = 0;
\r
9578 stream_.userInterleaved = true;
\r
9579 stream_.streamTime = 0.0;
\r
9580 stream_.apiHandle = 0;
\r
9581 stream_.deviceBuffer = 0;
\r
9582 stream_.callbackInfo.callback = 0;
\r
9583 stream_.callbackInfo.userData = 0;
\r
9584 stream_.callbackInfo.isRunning = false;
\r
9585 stream_.callbackInfo.errorCallback = 0;
\r
9586 for ( int i=0; i<2; i++ ) {
\r
9587 stream_.device[i] = 11111;
\r
9588 stream_.doConvertBuffer[i] = false;
\r
9589 stream_.deviceInterleaved[i] = true;
\r
9590 stream_.doByteSwap[i] = false;
\r
9591 stream_.nUserChannels[i] = 0;
\r
9592 stream_.nDeviceChannels[i] = 0;
\r
9593 stream_.channelOffset[i] = 0;
\r
9594 stream_.deviceFormat[i] = 0;
\r
9595 stream_.latency[i] = 0;
\r
9596 stream_.userBuffer[i] = 0;
\r
9597 stream_.convertInfo[i].channels = 0;
\r
9598 stream_.convertInfo[i].inJump = 0;
\r
9599 stream_.convertInfo[i].outJump = 0;
\r
9600 stream_.convertInfo[i].inFormat = 0;
\r
9601 stream_.convertInfo[i].outFormat = 0;
\r
9602 stream_.convertInfo[i].inOffset.clear();
\r
9603 stream_.convertInfo[i].outOffset.clear();
\r
9607 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9609 if ( format == RTAUDIO_SINT16 )
\r
9611 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9613 else if ( format == RTAUDIO_FLOAT64 )
\r
9615 else if ( format == RTAUDIO_SINT24 )
\r
9617 else if ( format == RTAUDIO_SINT8 )
\r
9620 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9621 error( RtAudioError::WARNING );
\r
9626 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9628 if ( mode == INPUT ) { // convert device to user buffer
\r
9629 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9630 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9631 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9632 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9634 else { // convert user to device buffer
\r
9635 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9636 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9637 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9638 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9641 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9642 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9644 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9646 // Set up the interleave/deinterleave offsets.
\r
9647 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9648 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9649 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9650 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9651 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9652 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9653 stream_.convertInfo[mode].inJump = 1;
\r
9657 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9658 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9659 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9660 stream_.convertInfo[mode].outJump = 1;
\r
9664 else { // no (de)interleaving
\r
9665 if ( stream_.userInterleaved ) {
\r
9666 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9667 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9668 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9672 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9673 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9674 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9675 stream_.convertInfo[mode].inJump = 1;
\r
9676 stream_.convertInfo[mode].outJump = 1;
\r
9681 // Add channel offset.
\r
9682 if ( firstChannel > 0 ) {
\r
9683 if ( stream_.deviceInterleaved[mode] ) {
\r
9684 if ( mode == OUTPUT ) {
\r
9685 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9686 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9689 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9690 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9694 if ( mode == OUTPUT ) {
\r
9695 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9696 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9699 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9700 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9706 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9708 // This function does format conversion, input/output channel compensation, and
\r
9709 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9710 // the lower three bytes of a 32-bit integer.
\r
9712 // Clear our device buffer when in/out duplex device channels are different
\r
9713 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9714 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9715 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9718 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9720 Float64 *out = (Float64 *)outBuffer;
\r
9722 if (info.inFormat == RTAUDIO_SINT8) {
\r
9723 signed char *in = (signed char *)inBuffer;
\r
9724 scale = 1.0 / 127.5;
\r
9725 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9726 for (j=0; j<info.channels; j++) {
\r
9727 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9728 out[info.outOffset[j]] += 0.5;
\r
9729 out[info.outOffset[j]] *= scale;
\r
9731 in += info.inJump;
\r
9732 out += info.outJump;
\r
9735 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9736 Int16 *in = (Int16 *)inBuffer;
\r
9737 scale = 1.0 / 32767.5;
\r
9738 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9739 for (j=0; j<info.channels; j++) {
\r
9740 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9741 out[info.outOffset[j]] += 0.5;
\r
9742 out[info.outOffset[j]] *= scale;
\r
9744 in += info.inJump;
\r
9745 out += info.outJump;
\r
9748 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9749 Int24 *in = (Int24 *)inBuffer;
\r
9750 scale = 1.0 / 8388607.5;
\r
9751 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9752 for (j=0; j<info.channels; j++) {
\r
9753 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9754 out[info.outOffset[j]] += 0.5;
\r
9755 out[info.outOffset[j]] *= scale;
\r
9757 in += info.inJump;
\r
9758 out += info.outJump;
\r
9761 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9762 Int32 *in = (Int32 *)inBuffer;
\r
9763 scale = 1.0 / 2147483647.5;
\r
9764 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9765 for (j=0; j<info.channels; j++) {
\r
9766 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9767 out[info.outOffset[j]] += 0.5;
\r
9768 out[info.outOffset[j]] *= scale;
\r
9770 in += info.inJump;
\r
9771 out += info.outJump;
\r
9774 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9775 Float32 *in = (Float32 *)inBuffer;
\r
9776 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9777 for (j=0; j<info.channels; j++) {
\r
9778 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9780 in += info.inJump;
\r
9781 out += info.outJump;
\r
9784 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9785 // Channel compensation and/or (de)interleaving only.
\r
9786 Float64 *in = (Float64 *)inBuffer;
\r
9787 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9788 for (j=0; j<info.channels; j++) {
\r
9789 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9791 in += info.inJump;
\r
9792 out += info.outJump;
\r
9796 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9798 Float32 *out = (Float32 *)outBuffer;
\r
9800 if (info.inFormat == RTAUDIO_SINT8) {
\r
9801 signed char *in = (signed char *)inBuffer;
\r
9802 scale = (Float32) ( 1.0 / 127.5 );
\r
9803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9804 for (j=0; j<info.channels; j++) {
\r
9805 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9806 out[info.outOffset[j]] += 0.5;
\r
9807 out[info.outOffset[j]] *= scale;
\r
9809 in += info.inJump;
\r
9810 out += info.outJump;
\r
9813 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9814 Int16 *in = (Int16 *)inBuffer;
\r
9815 scale = (Float32) ( 1.0 / 32767.5 );
\r
9816 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9817 for (j=0; j<info.channels; j++) {
\r
9818 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9819 out[info.outOffset[j]] += 0.5;
\r
9820 out[info.outOffset[j]] *= scale;
\r
9822 in += info.inJump;
\r
9823 out += info.outJump;
\r
9826 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9827 Int24 *in = (Int24 *)inBuffer;
\r
9828 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9829 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9830 for (j=0; j<info.channels; j++) {
\r
9831 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9832 out[info.outOffset[j]] += 0.5;
\r
9833 out[info.outOffset[j]] *= scale;
\r
9835 in += info.inJump;
\r
9836 out += info.outJump;
\r
9839 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9840 Int32 *in = (Int32 *)inBuffer;
\r
9841 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9843 for (j=0; j<info.channels; j++) {
\r
9844 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9845 out[info.outOffset[j]] += 0.5;
\r
9846 out[info.outOffset[j]] *= scale;
\r
9848 in += info.inJump;
\r
9849 out += info.outJump;
\r
9852 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9853 // Channel compensation and/or (de)interleaving only.
\r
9854 Float32 *in = (Float32 *)inBuffer;
\r
9855 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9856 for (j=0; j<info.channels; j++) {
\r
9857 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9859 in += info.inJump;
\r
9860 out += info.outJump;
\r
9863 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9864 Float64 *in = (Float64 *)inBuffer;
\r
9865 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9866 for (j=0; j<info.channels; j++) {
\r
9867 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9869 in += info.inJump;
\r
9870 out += info.outJump;
\r
9874 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9875 Int32 *out = (Int32 *)outBuffer;
\r
9876 if (info.inFormat == RTAUDIO_SINT8) {
\r
9877 signed char *in = (signed char *)inBuffer;
\r
9878 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9879 for (j=0; j<info.channels; j++) {
\r
9880 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9881 out[info.outOffset[j]] <<= 24;
\r
9883 in += info.inJump;
\r
9884 out += info.outJump;
\r
9887 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9888 Int16 *in = (Int16 *)inBuffer;
\r
9889 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9890 for (j=0; j<info.channels; j++) {
\r
9891 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9892 out[info.outOffset[j]] <<= 16;
\r
9894 in += info.inJump;
\r
9895 out += info.outJump;
\r
9898 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9899 Int24 *in = (Int24 *)inBuffer;
\r
9900 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9901 for (j=0; j<info.channels; j++) {
\r
9902 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9903 out[info.outOffset[j]] <<= 8;
\r
9905 in += info.inJump;
\r
9906 out += info.outJump;
\r
9909 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9910 // Channel compensation and/or (de)interleaving only.
\r
9911 Int32 *in = (Int32 *)inBuffer;
\r
9912 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9913 for (j=0; j<info.channels; j++) {
\r
9914 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9916 in += info.inJump;
\r
9917 out += info.outJump;
\r
9920 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9921 Float32 *in = (Float32 *)inBuffer;
\r
9922 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9923 for (j=0; j<info.channels; j++) {
\r
9924 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9926 in += info.inJump;
\r
9927 out += info.outJump;
\r
9930 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9931 Float64 *in = (Float64 *)inBuffer;
\r
9932 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9933 for (j=0; j<info.channels; j++) {
\r
9934 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9936 in += info.inJump;
\r
9937 out += info.outJump;
\r
9941 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9942 Int24 *out = (Int24 *)outBuffer;
\r
9943 if (info.inFormat == RTAUDIO_SINT8) {
\r
9944 signed char *in = (signed char *)inBuffer;
\r
9945 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9946 for (j=0; j<info.channels; j++) {
\r
9947 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9948 //out[info.outOffset[j]] <<= 16;
\r
9950 in += info.inJump;
\r
9951 out += info.outJump;
\r
9954 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9955 Int16 *in = (Int16 *)inBuffer;
\r
9956 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9957 for (j=0; j<info.channels; j++) {
\r
9958 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9959 //out[info.outOffset[j]] <<= 8;
\r
9961 in += info.inJump;
\r
9962 out += info.outJump;
\r
9965 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9966 // Channel compensation and/or (de)interleaving only.
\r
9967 Int24 *in = (Int24 *)inBuffer;
\r
9968 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9969 for (j=0; j<info.channels; j++) {
\r
9970 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9972 in += info.inJump;
\r
9973 out += info.outJump;
\r
9976 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9977 Int32 *in = (Int32 *)inBuffer;
\r
9978 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9979 for (j=0; j<info.channels; j++) {
\r
9980 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9981 //out[info.outOffset[j]] >>= 8;
\r
9983 in += info.inJump;
\r
9984 out += info.outJump;
\r
9987 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9988 Float32 *in = (Float32 *)inBuffer;
\r
9989 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9990 for (j=0; j<info.channels; j++) {
\r
9991 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9993 in += info.inJump;
\r
9994 out += info.outJump;
\r
9997 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9998 Float64 *in = (Float64 *)inBuffer;
\r
9999 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10000 for (j=0; j<info.channels; j++) {
\r
10001 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10003 in += info.inJump;
\r
10004 out += info.outJump;
\r
10008 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10009 Int16 *out = (Int16 *)outBuffer;
\r
10010 if (info.inFormat == RTAUDIO_SINT8) {
\r
10011 signed char *in = (signed char *)inBuffer;
\r
10012 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10013 for (j=0; j<info.channels; j++) {
\r
10014 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10015 out[info.outOffset[j]] <<= 8;
\r
10017 in += info.inJump;
\r
10018 out += info.outJump;
\r
10021 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10022 // Channel compensation and/or (de)interleaving only.
\r
10023 Int16 *in = (Int16 *)inBuffer;
\r
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10025 for (j=0; j<info.channels; j++) {
\r
10026 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10028 in += info.inJump;
\r
10029 out += info.outJump;
\r
10032 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10033 Int24 *in = (Int24 *)inBuffer;
\r
10034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10035 for (j=0; j<info.channels; j++) {
\r
10036 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10038 in += info.inJump;
\r
10039 out += info.outJump;
\r
10042 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10043 Int32 *in = (Int32 *)inBuffer;
\r
10044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10045 for (j=0; j<info.channels; j++) {
\r
10046 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10048 in += info.inJump;
\r
10049 out += info.outJump;
\r
10052 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10053 Float32 *in = (Float32 *)inBuffer;
\r
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10055 for (j=0; j<info.channels; j++) {
\r
10056 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10058 in += info.inJump;
\r
10059 out += info.outJump;
\r
10062 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10063 Float64 *in = (Float64 *)inBuffer;
\r
10064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10065 for (j=0; j<info.channels; j++) {
\r
10066 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10068 in += info.inJump;
\r
10069 out += info.outJump;
\r
10073 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10074 signed char *out = (signed char *)outBuffer;
\r
10075 if (info.inFormat == RTAUDIO_SINT8) {
\r
10076 // Channel compensation and/or (de)interleaving only.
\r
10077 signed char *in = (signed char *)inBuffer;
\r
10078 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10079 for (j=0; j<info.channels; j++) {
\r
10080 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10082 in += info.inJump;
\r
10083 out += info.outJump;
\r
10086 if (info.inFormat == RTAUDIO_SINT16) {
\r
10087 Int16 *in = (Int16 *)inBuffer;
\r
10088 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10089 for (j=0; j<info.channels; j++) {
\r
10090 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10092 in += info.inJump;
\r
10093 out += info.outJump;
\r
10096 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10097 Int24 *in = (Int24 *)inBuffer;
\r
10098 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10099 for (j=0; j<info.channels; j++) {
\r
10100 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10102 in += info.inJump;
\r
10103 out += info.outJump;
\r
10106 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10107 Int32 *in = (Int32 *)inBuffer;
\r
10108 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10109 for (j=0; j<info.channels; j++) {
\r
10110 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10112 in += info.inJump;
\r
10113 out += info.outJump;
\r
10116 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10117 Float32 *in = (Float32 *)inBuffer;
\r
10118 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10119 for (j=0; j<info.channels; j++) {
\r
10120 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10122 in += info.inJump;
\r
10123 out += info.outJump;
\r
10126 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10127 Float64 *in = (Float64 *)inBuffer;
\r
10128 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10129 for (j=0; j<info.channels; j++) {
\r
10130 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10132 in += info.inJump;
\r
10133 out += info.outJump;
\r
10139 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10140 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10141 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10143 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10145 register char val;
\r
10146 register char *ptr;
\r
10149 if ( format == RTAUDIO_SINT16 ) {
\r
10150 for ( unsigned int i=0; i<samples; i++ ) {
\r
10151 // Swap 1st and 2nd bytes.
\r
10153 *(ptr) = *(ptr+1);
\r
10156 // Increment 2 bytes.
\r
10160 else if ( format == RTAUDIO_SINT32 ||
\r
10161 format == RTAUDIO_FLOAT32 ) {
\r
10162 for ( unsigned int i=0; i<samples; i++ ) {
\r
10163 // Swap 1st and 4th bytes.
\r
10165 *(ptr) = *(ptr+3);
\r
10168 // Swap 2nd and 3rd bytes.
\r
10171 *(ptr) = *(ptr+1);
\r
10174 // Increment 3 more bytes.
\r
10178 else if ( format == RTAUDIO_SINT24 ) {
\r
10179 for ( unsigned int i=0; i<samples; i++ ) {
\r
10180 // Swap 1st and 3rd bytes.
\r
10182 *(ptr) = *(ptr+2);
\r
10185 // Increment 2 more bytes.
\r
10189 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10190 for ( unsigned int i=0; i<samples; i++ ) {
\r
10191 // Swap 1st and 8th bytes
\r
10193 *(ptr) = *(ptr+7);
\r
10196 // Swap 2nd and 7th bytes
\r
10199 *(ptr) = *(ptr+5);
\r
10202 // Swap 3rd and 6th bytes
\r
10205 *(ptr) = *(ptr+3);
\r
10208 // Swap 4th and 5th bytes
\r
10211 *(ptr) = *(ptr+1);
\r
10214 // Increment 5 more bytes.
\r
10220 // Indentation settings for Vim and Emacs
\r
10222 // Local Variables:
\r
10223 // c-basic-offset: 2
\r
10224 // indent-tabs-mode: nil
\r
10227 // vim: et sts=2 sw=2
\r