1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1409 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1411 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1412 kAudioObjectPropertyScopeGlobal,
\r
1413 kAudioObjectPropertyElementMaster };
\r
1415 property.mSelector = kAudioDeviceProcessorOverload;
\r
1416 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1417 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1418 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1419 error( RtAudioError::WARNING );
\r
1422 if ( stream_.state == STREAM_RUNNING )
\r
1423 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1424 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1425 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1427 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1428 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1432 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1434 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1435 kAudioObjectPropertyScopeGlobal,
\r
1436 kAudioObjectPropertyElementMaster };
\r
1438 property.mSelector = kAudioDeviceProcessorOverload;
\r
1439 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1440 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1441 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1442 error( RtAudioError::WARNING );
\r
1445 if ( stream_.state == STREAM_RUNNING )
\r
1446 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1447 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1448 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1450 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1451 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1455 for ( int i=0; i<2; i++ ) {
\r
1456 if ( stream_.userBuffer[i] ) {
\r
1457 free( stream_.userBuffer[i] );
\r
1458 stream_.userBuffer[i] = 0;
\r
1462 if ( stream_.deviceBuffer ) {
\r
1463 free( stream_.deviceBuffer );
\r
1464 stream_.deviceBuffer = 0;
\r
1467 // Destroy pthread condition variable.
\r
1468 pthread_cond_destroy( &handle->condition );
\r
1470 stream_.apiHandle = 0;
\r
1472 stream_.mode = UNINITIALIZED;
\r
1473 stream_.state = STREAM_CLOSED;
\r
1476 void RtApiCore :: startStream( void )
\r
1479 if ( stream_.state == STREAM_RUNNING ) {
\r
1480 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1481 error( RtAudioError::WARNING );
\r
1485 OSStatus result = noErr;
\r
1486 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1489 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1490 if ( result != noErr ) {
\r
1491 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1492 errorText_ = errorStream_.str();
\r
1497 if ( stream_.mode == INPUT ||
\r
1498 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1500 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1501 if ( result != noErr ) {
\r
1502 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1503 errorText_ = errorStream_.str();
\r
1508 handle->drainCounter = 0;
\r
1509 handle->internalDrain = false;
\r
1510 stream_.state = STREAM_RUNNING;
\r
1513 if ( result == noErr ) return;
\r
1514 error( RtAudioError::SYSTEM_ERROR );
\r
1517 void RtApiCore :: stopStream( void )
\r
1520 if ( stream_.state == STREAM_STOPPED ) {
\r
1521 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1522 error( RtAudioError::WARNING );
\r
1526 OSStatus result = noErr;
\r
1527 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1528 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1530 if ( handle->drainCounter == 0 ) {
\r
1531 handle->drainCounter = 2;
\r
1532 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1535 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1536 if ( result != noErr ) {
\r
1537 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1538 errorText_ = errorStream_.str();
\r
1543 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1545 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1546 if ( result != noErr ) {
\r
1547 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1548 errorText_ = errorStream_.str();
\r
1553 stream_.state = STREAM_STOPPED;
\r
1556 if ( result == noErr ) return;
\r
1557 error( RtAudioError::SYSTEM_ERROR );
\r
1560 void RtApiCore :: abortStream( void )
\r
1563 if ( stream_.state == STREAM_STOPPED ) {
\r
1564 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1565 error( RtAudioError::WARNING );
\r
1569 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1570 handle->drainCounter = 2;
\r
1575 // This function will be called by a spawned thread when the user
\r
1576 // callback function signals that the stream should be stopped or
\r
1577 // aborted. It is better to handle it this way because the
\r
1578 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1579 // function is called.
\r
1580 static void *coreStopStream( void *ptr )
\r
1582 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1583 RtApiCore *object = (RtApiCore *) info->object;
\r
1585 object->stopStream();
\r
1586 pthread_exit( NULL );
\r
1589 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1590 const AudioBufferList *inBufferList,
\r
1591 const AudioBufferList *outBufferList )
\r
1593 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1594 if ( stream_.state == STREAM_CLOSED ) {
\r
1595 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1596 error( RtAudioError::WARNING );
\r
1600 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1601 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1603 // Check if we were draining the stream and signal is finished.
\r
1604 if ( handle->drainCounter > 3 ) {
\r
1605 ThreadHandle threadId;
\r
1607 stream_.state = STREAM_STOPPING;
\r
1608 if ( handle->internalDrain == true )
\r
1609 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1610 else // external call to stopStream()
\r
1611 pthread_cond_signal( &handle->condition );
\r
1615 AudioDeviceID outputDevice = handle->id[0];
\r
1617 // Invoke user callback to get fresh output data UNLESS we are
\r
1618 // draining stream or duplex mode AND the input/output devices are
\r
1619 // different AND this function is called for the input device.
\r
1620 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1621 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1622 double streamTime = getStreamTime();
\r
1623 RtAudioStreamStatus status = 0;
\r
1624 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1625 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1626 handle->xrun[0] = false;
\r
1628 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1629 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1630 handle->xrun[1] = false;
\r
1633 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1634 stream_.bufferSize, streamTime, status, info->userData );
\r
1635 if ( cbReturnValue == 2 ) {
\r
1636 stream_.state = STREAM_STOPPING;
\r
1637 handle->drainCounter = 2;
\r
1641 else if ( cbReturnValue == 1 ) {
\r
1642 handle->drainCounter = 1;
\r
1643 handle->internalDrain = true;
\r
1647 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1649 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1651 if ( handle->nStreams[0] == 1 ) {
\r
1652 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1654 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1656 else { // fill multiple streams with zeros
\r
1657 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1658 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1660 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1664 else if ( handle->nStreams[0] == 1 ) {
\r
1665 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1666 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1667 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1669 else { // copy from user buffer
\r
1670 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1671 stream_.userBuffer[0],
\r
1672 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1675 else { // fill multiple streams
\r
1676 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1677 if ( stream_.doConvertBuffer[0] ) {
\r
1678 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1679 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1682 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1683 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1684 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1685 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1686 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1689 else { // fill multiple multi-channel streams with interleaved data
\r
1690 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1691 Float32 *out, *in;
\r
1693 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1694 UInt32 inChannels = stream_.nUserChannels[0];
\r
1695 if ( stream_.doConvertBuffer[0] ) {
\r
1696 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1697 inChannels = stream_.nDeviceChannels[0];
\r
1700 if ( inInterleaved ) inOffset = 1;
\r
1701 else inOffset = stream_.bufferSize;
\r
1703 channelsLeft = inChannels;
\r
1704 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1706 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1707 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1710 // Account for possible channel offset in first stream
\r
1711 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1712 streamChannels -= stream_.channelOffset[0];
\r
1713 outJump = stream_.channelOffset[0];
\r
1717 // Account for possible unfilled channels at end of the last stream
\r
1718 if ( streamChannels > channelsLeft ) {
\r
1719 outJump = streamChannels - channelsLeft;
\r
1720 streamChannels = channelsLeft;
\r
1723 // Determine input buffer offsets and skips
\r
1724 if ( inInterleaved ) {
\r
1725 inJump = inChannels;
\r
1726 in += inChannels - channelsLeft;
\r
1730 in += (inChannels - channelsLeft) * inOffset;
\r
1733 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1734 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1735 *out++ = in[j*inOffset];
\r
1740 channelsLeft -= streamChannels;
\r
1746 // Don't bother draining input
\r
1747 if ( handle->drainCounter ) {
\r
1748 handle->drainCounter++;
\r
1752 AudioDeviceID inputDevice;
\r
1753 inputDevice = handle->id[1];
\r
1754 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1756 if ( handle->nStreams[1] == 1 ) {
\r
1757 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1758 convertBuffer( stream_.userBuffer[1],
\r
1759 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1760 stream_.convertInfo[1] );
\r
1762 else { // copy to user buffer
\r
1763 memcpy( stream_.userBuffer[1],
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1765 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1768 else { // read from multiple streams
\r
1769 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1770 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1772 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1773 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1774 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1775 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1776 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1779 else { // read from multiple multi-channel streams
\r
1780 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1781 Float32 *out, *in;
\r
1783 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1784 UInt32 outChannels = stream_.nUserChannels[1];
\r
1785 if ( stream_.doConvertBuffer[1] ) {
\r
1786 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1787 outChannels = stream_.nDeviceChannels[1];
\r
1790 if ( outInterleaved ) outOffset = 1;
\r
1791 else outOffset = stream_.bufferSize;
\r
1793 channelsLeft = outChannels;
\r
1794 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1796 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1797 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1800 // Account for possible channel offset in first stream
\r
1801 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1802 streamChannels -= stream_.channelOffset[1];
\r
1803 inJump = stream_.channelOffset[1];
\r
1807 // Account for possible unread channels at end of the last stream
\r
1808 if ( streamChannels > channelsLeft ) {
\r
1809 inJump = streamChannels - channelsLeft;
\r
1810 streamChannels = channelsLeft;
\r
1813 // Determine output buffer offsets and skips
\r
1814 if ( outInterleaved ) {
\r
1815 outJump = outChannels;
\r
1816 out += outChannels - channelsLeft;
\r
1820 out += (outChannels - channelsLeft) * outOffset;
\r
1823 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1824 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1825 out[j*outOffset] = *in++;
\r
1830 channelsLeft -= streamChannels;
\r
1834 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1835 convertBuffer( stream_.userBuffer[1],
\r
1836 stream_.deviceBuffer,
\r
1837 stream_.convertInfo[1] );
\r
1843 //MUTEX_UNLOCK( &stream_.mutex );
\r
1845 RtApi::tickStreamTime();
\r
1849 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1853 case kAudioHardwareNotRunningError:
\r
1854 return "kAudioHardwareNotRunningError";
\r
1856 case kAudioHardwareUnspecifiedError:
\r
1857 return "kAudioHardwareUnspecifiedError";
\r
1859 case kAudioHardwareUnknownPropertyError:
\r
1860 return "kAudioHardwareUnknownPropertyError";
\r
1862 case kAudioHardwareBadPropertySizeError:
\r
1863 return "kAudioHardwareBadPropertySizeError";
\r
1865 case kAudioHardwareIllegalOperationError:
\r
1866 return "kAudioHardwareIllegalOperationError";
\r
1868 case kAudioHardwareBadObjectError:
\r
1869 return "kAudioHardwareBadObjectError";
\r
1871 case kAudioHardwareBadDeviceError:
\r
1872 return "kAudioHardwareBadDeviceError";
\r
1874 case kAudioHardwareBadStreamError:
\r
1875 return "kAudioHardwareBadStreamError";
\r
1877 case kAudioHardwareUnsupportedOperationError:
\r
1878 return "kAudioHardwareUnsupportedOperationError";
\r
1880 case kAudioDeviceUnsupportedFormatError:
\r
1881 return "kAudioDeviceUnsupportedFormatError";
\r
1883 case kAudioDevicePermissionsError:
\r
1884 return "kAudioDevicePermissionsError";
\r
1887 return "CoreAudio unknown error";
\r
1891 //******************** End of __MACOSX_CORE__ *********************//
\r
1894 #if defined(__UNIX_JACK__)
\r
1896 // JACK is a low-latency audio server, originally written for the
\r
1897 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1898 // connect a number of different applications to an audio device, as
\r
1899 // well as allowing them to share audio between themselves.
\r
1901 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1902 // have ports connected to the server. The JACK server is typically
\r
1903 // started in a terminal as follows:
\r
1905 // .jackd -d alsa -d hw:0
\r
1907 // or through an interface program such as qjackctl. Many of the
\r
1908 // parameters normally set for a stream are fixed by the JACK server
\r
1909 // and can be specified when the JACK server is started. In
\r
1912 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1914 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1915 // frames, and number of buffers = 4. Once the server is running, it
\r
1916 // is not possible to override these values. If the values are not
\r
1917 // specified in the command-line, the JACK server uses default values.
\r
1919 // The JACK server does not have to be running when an instance of
\r
1920 // RtApiJack is created, though the function getDeviceCount() will
\r
1921 // report 0 devices found until JACK has been started. When no
\r
1922 // devices are available (i.e., the JACK server is not running), a
\r
1923 // stream cannot be opened.
\r
1925 #include <jack/jack.h>
\r
1926 #include <unistd.h>
\r
1929 // A structure to hold various information related to the Jack API
\r
1930 // implementation.
\r
1931 struct JackHandle {
\r
1932 jack_client_t *client;
\r
1933 jack_port_t **ports[2];
\r
1934 std::string deviceName[2];
\r
1936 pthread_cond_t condition;
\r
1937 int drainCounter; // Tracks callback counts when draining
\r
1938 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1941 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1944 static void jackSilentError( const char * ) {};
\r
1946 RtApiJack :: RtApiJack()
\r
1948 // Nothing to do here.
\r
1949 #if !defined(__RTAUDIO_DEBUG__)
\r
1950 // Turn off Jack's internal error reporting.
\r
1951 jack_set_error_function( &jackSilentError );
\r
1955 RtApiJack :: ~RtApiJack()
\r
1957 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1960 unsigned int RtApiJack :: getDeviceCount( void )
\r
1962 // See if we can become a jack client.
\r
1963 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1964 jack_status_t *status = NULL;
\r
1965 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1966 if ( client == 0 ) return 0;
\r
1968 const char **ports;
\r
1969 std::string port, previousPort;
\r
1970 unsigned int nChannels = 0, nDevices = 0;
\r
1971 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1973 // Parse the port names up to the first colon (:).
\r
1974 size_t iColon = 0;
\r
1976 port = (char *) ports[ nChannels ];
\r
1977 iColon = port.find(":");
\r
1978 if ( iColon != std::string::npos ) {
\r
1979 port = port.substr( 0, iColon + 1 );
\r
1980 if ( port != previousPort ) {
\r
1982 previousPort = port;
\r
1985 } while ( ports[++nChannels] );
\r
1989 jack_client_close( client );
\r
1993 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1995 RtAudio::DeviceInfo info;
\r
1996 info.probed = false;
\r
1998 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1999 jack_status_t *status = NULL;
\r
2000 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2001 if ( client == 0 ) {
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 const char **ports;
\r
2008 std::string port, previousPort;
\r
2009 unsigned int nPorts = 0, nDevices = 0;
\r
2010 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2012 // Parse the port names up to the first colon (:).
\r
2013 size_t iColon = 0;
\r
2015 port = (char *) ports[ nPorts ];
\r
2016 iColon = port.find(":");
\r
2017 if ( iColon != std::string::npos ) {
\r
2018 port = port.substr( 0, iColon );
\r
2019 if ( port != previousPort ) {
\r
2020 if ( nDevices == device ) info.name = port;
\r
2022 previousPort = port;
\r
2025 } while ( ports[++nPorts] );
\r
2029 if ( device >= nDevices ) {
\r
2030 jack_client_close( client );
\r
2031 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2032 error( RtAudioError::INVALID_USE );
\r
2036 // Get the current jack server sample rate.
\r
2037 info.sampleRates.clear();
\r
2039 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2040 info.sampleRates.push_back( info.preferredSampleRate );
\r
2042 // Count the available ports containing the client name as device
\r
2043 // channels. Jack "input ports" equal RtAudio output channels.
\r
2044 unsigned int nChannels = 0;
\r
2045 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2047 while ( ports[ nChannels ] ) nChannels++;
\r
2049 info.outputChannels = nChannels;
\r
2052 // Jack "output ports" equal RtAudio input channels.
\r
2054 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2056 while ( ports[ nChannels ] ) nChannels++;
\r
2058 info.inputChannels = nChannels;
\r
2061 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2062 jack_client_close(client);
\r
2063 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2064 error( RtAudioError::WARNING );
\r
2068 // If device opens for both playback and capture, we determine the channels.
\r
2069 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2070 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2072 // Jack always uses 32-bit floats.
\r
2073 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2075 // Jack doesn't provide default devices so we'll use the first available one.
\r
2076 if ( device == 0 && info.outputChannels > 0 )
\r
2077 info.isDefaultOutput = true;
\r
2078 if ( device == 0 && info.inputChannels > 0 )
\r
2079 info.isDefaultInput = true;
\r
2081 jack_client_close(client);
\r
2082 info.probed = true;
\r
2086 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2088 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2090 RtApiJack *object = (RtApiJack *) info->object;
\r
2091 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2096 // This function will be called by a spawned thread when the Jack
\r
2097 // server signals that it is shutting down. It is necessary to handle
\r
2098 // it this way because the jackShutdown() function must return before
\r
2099 // the jack_deactivate() function (in closeStream()) will return.
\r
2100 static void *jackCloseStream( void *ptr )
\r
2102 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2103 RtApiJack *object = (RtApiJack *) info->object;
\r
2105 object->closeStream();
\r
2107 pthread_exit( NULL );
\r
2109 static void jackShutdown( void *infoPointer )
\r
2111 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2112 RtApiJack *object = (RtApiJack *) info->object;
\r
2114 // Check current stream state. If stopped, then we'll assume this
\r
2115 // was called as a result of a call to RtApiJack::stopStream (the
\r
2116 // deactivation of a client handle causes this function to be called).
\r
2117 // If not, we'll assume the Jack server is shutting down or some
\r
2118 // other problem occurred and we should close the stream.
\r
2119 if ( object->isStreamRunning() == false ) return;
\r
2121 ThreadHandle threadId;
\r
2122 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2123 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2126 static int jackXrun( void *infoPointer )
\r
2128 JackHandle *handle = (JackHandle *) infoPointer;
\r
2130 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2131 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2136 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2137 unsigned int firstChannel, unsigned int sampleRate,
\r
2138 RtAudioFormat format, unsigned int *bufferSize,
\r
2139 RtAudio::StreamOptions *options )
\r
2141 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2143 // Look for jack server and try to become a client (only do once per stream).
\r
2144 jack_client_t *client = 0;
\r
2145 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2146 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2147 jack_status_t *status = NULL;
\r
2148 if ( options && !options->streamName.empty() )
\r
2149 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2151 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2152 if ( client == 0 ) {
\r
2153 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2154 error( RtAudioError::WARNING );
\r
2159 // The handle must have been created on an earlier pass.
\r
2160 client = handle->client;
\r
2163 const char **ports;
\r
2164 std::string port, previousPort, deviceName;
\r
2165 unsigned int nPorts = 0, nDevices = 0;
\r
2166 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2168 // Parse the port names up to the first colon (:).
\r
2169 size_t iColon = 0;
\r
2171 port = (char *) ports[ nPorts ];
\r
2172 iColon = port.find(":");
\r
2173 if ( iColon != std::string::npos ) {
\r
2174 port = port.substr( 0, iColon );
\r
2175 if ( port != previousPort ) {
\r
2176 if ( nDevices == device ) deviceName = port;
\r
2178 previousPort = port;
\r
2181 } while ( ports[++nPorts] );
\r
2185 if ( device >= nDevices ) {
\r
2186 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2190 // Count the available ports containing the client name as device
\r
2191 // channels. Jack "input ports" equal RtAudio output channels.
\r
2192 unsigned int nChannels = 0;
\r
2193 unsigned long flag = JackPortIsInput;
\r
2194 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2195 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2197 while ( ports[ nChannels ] ) nChannels++;
\r
2201 // Compare the jack ports for specified client to the requested number of channels.
\r
2202 if ( nChannels < (channels + firstChannel) ) {
\r
2203 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2204 errorText_ = errorStream_.str();
\r
2208 // Check the jack server sample rate.
\r
2209 unsigned int jackRate = jack_get_sample_rate( client );
\r
2210 if ( sampleRate != jackRate ) {
\r
2211 jack_client_close( client );
\r
2212 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2213 errorText_ = errorStream_.str();
\r
2216 stream_.sampleRate = jackRate;
\r
2218 // Get the latency of the JACK port.
\r
2219 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2220 if ( ports[ firstChannel ] ) {
\r
2221 // Added by Ge Wang
\r
2222 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2223 // the range (usually the min and max are equal)
\r
2224 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2225 // get the latency range
\r
2226 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2227 // be optimistic, use the min!
\r
2228 stream_.latency[mode] = latrange.min;
\r
2229 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2233 // The jack server always uses 32-bit floating-point data.
\r
2234 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2235 stream_.userFormat = format;
\r
2237 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2238 else stream_.userInterleaved = true;
\r
2240 // Jack always uses non-interleaved buffers.
\r
2241 stream_.deviceInterleaved[mode] = false;
\r
2243 // Jack always provides host byte-ordered data.
\r
2244 stream_.doByteSwap[mode] = false;
\r
2246 // Get the buffer size. The buffer size and number of buffers
\r
2247 // (periods) is set when the jack server is started.
\r
2248 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2249 *bufferSize = stream_.bufferSize;
\r
2251 stream_.nDeviceChannels[mode] = channels;
\r
2252 stream_.nUserChannels[mode] = channels;
\r
2254 // Set flags for buffer conversion.
\r
2255 stream_.doConvertBuffer[mode] = false;
\r
2256 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2257 stream_.doConvertBuffer[mode] = true;
\r
2258 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2259 stream_.nUserChannels[mode] > 1 )
\r
2260 stream_.doConvertBuffer[mode] = true;
\r
2262 // Allocate our JackHandle structure for the stream.
\r
2263 if ( handle == 0 ) {
\r
2265 handle = new JackHandle;
\r
2267 catch ( std::bad_alloc& ) {
\r
2268 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2272 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2273 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2276 stream_.apiHandle = (void *) handle;
\r
2277 handle->client = client;
\r
2279 handle->deviceName[mode] = deviceName;
\r
2281 // Allocate necessary internal buffers.
\r
2282 unsigned long bufferBytes;
\r
2283 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2284 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2285 if ( stream_.userBuffer[mode] == NULL ) {
\r
2286 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2290 if ( stream_.doConvertBuffer[mode] ) {
\r
2292 bool makeBuffer = true;
\r
2293 if ( mode == OUTPUT )
\r
2294 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2295 else { // mode == INPUT
\r
2296 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2297 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2298 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2299 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2303 if ( makeBuffer ) {
\r
2304 bufferBytes *= *bufferSize;
\r
2305 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2306 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2307 if ( stream_.deviceBuffer == NULL ) {
\r
2308 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2314 // Allocate memory for the Jack ports (channels) identifiers.
\r
2315 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2316 if ( handle->ports[mode] == NULL ) {
\r
2317 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2321 stream_.device[mode] = device;
\r
2322 stream_.channelOffset[mode] = firstChannel;
\r
2323 stream_.state = STREAM_STOPPED;
\r
2324 stream_.callbackInfo.object = (void *) this;
\r
2326 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2327 // We had already set up the stream for output.
\r
2328 stream_.mode = DUPLEX;
\r
2330 stream_.mode = mode;
\r
2331 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2332 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2333 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2336 // Register our ports.
\r
2338 if ( mode == OUTPUT ) {
\r
2339 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2340 snprintf( label, 64, "outport %d", i );
\r
2341 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2342 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2346 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2347 snprintf( label, 64, "inport %d", i );
\r
2348 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2353 // Setup the buffer conversion information structure. We don't use
\r
2354 // buffers to do channel offsets, so we override that parameter
\r
2356 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2362 pthread_cond_destroy( &handle->condition );
\r
2363 jack_client_close( handle->client );
\r
2365 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2366 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2369 stream_.apiHandle = 0;
\r
2372 for ( int i=0; i<2; i++ ) {
\r
2373 if ( stream_.userBuffer[i] ) {
\r
2374 free( stream_.userBuffer[i] );
\r
2375 stream_.userBuffer[i] = 0;
\r
2379 if ( stream_.deviceBuffer ) {
\r
2380 free( stream_.deviceBuffer );
\r
2381 stream_.deviceBuffer = 0;
\r
2387 void RtApiJack :: closeStream( void )
\r
2389 if ( stream_.state == STREAM_CLOSED ) {
\r
2390 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2391 error( RtAudioError::WARNING );
\r
2395 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2398 if ( stream_.state == STREAM_RUNNING )
\r
2399 jack_deactivate( handle->client );
\r
2401 jack_client_close( handle->client );
\r
2405 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2406 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2407 pthread_cond_destroy( &handle->condition );
\r
2409 stream_.apiHandle = 0;
\r
2412 for ( int i=0; i<2; i++ ) {
\r
2413 if ( stream_.userBuffer[i] ) {
\r
2414 free( stream_.userBuffer[i] );
\r
2415 stream_.userBuffer[i] = 0;
\r
2419 if ( stream_.deviceBuffer ) {
\r
2420 free( stream_.deviceBuffer );
\r
2421 stream_.deviceBuffer = 0;
\r
2424 stream_.mode = UNINITIALIZED;
\r
2425 stream_.state = STREAM_CLOSED;
\r
2428 void RtApiJack :: startStream( void )
\r
2431 if ( stream_.state == STREAM_RUNNING ) {
\r
2432 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2433 error( RtAudioError::WARNING );
\r
2437 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2438 int result = jack_activate( handle->client );
\r
2440 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2444 const char **ports;
\r
2446 // Get the list of available ports.
\r
2447 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2449 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2450 if ( ports == NULL) {
\r
2451 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2455 // Now make the port connections. Since RtAudio wasn't designed to
\r
2456 // allow the user to select particular channels of a device, we'll
\r
2457 // just open the first "nChannels" ports with offset.
\r
2458 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2460 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2461 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2464 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2471 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2473 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2474 if ( ports == NULL) {
\r
2475 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2479 // Now make the port connections. See note above.
\r
2480 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2482 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2483 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2486 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2493 handle->drainCounter = 0;
\r
2494 handle->internalDrain = false;
\r
2495 stream_.state = STREAM_RUNNING;
\r
2498 if ( result == 0 ) return;
\r
2499 error( RtAudioError::SYSTEM_ERROR );
\r
2502 void RtApiJack :: stopStream( void )
\r
2505 if ( stream_.state == STREAM_STOPPED ) {
\r
2506 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2507 error( RtAudioError::WARNING );
\r
2511 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2514 if ( handle->drainCounter == 0 ) {
\r
2515 handle->drainCounter = 2;
\r
2516 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2520 jack_deactivate( handle->client );
\r
2521 stream_.state = STREAM_STOPPED;
\r
2524 void RtApiJack :: abortStream( void )
\r
2527 if ( stream_.state == STREAM_STOPPED ) {
\r
2528 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2529 error( RtAudioError::WARNING );
\r
2533 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2534 handle->drainCounter = 2;
\r
2539 // This function will be called by a spawned thread when the user
\r
2540 // callback function signals that the stream should be stopped or
\r
2541 // aborted. It is necessary to handle it this way because the
\r
2542 // callbackEvent() function must return before the jack_deactivate()
\r
2543 // function will return.
\r
2544 static void *jackStopStream( void *ptr )
\r
2546 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2547 RtApiJack *object = (RtApiJack *) info->object;
\r
2549 object->stopStream();
\r
2550 pthread_exit( NULL );
\r
2553 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2555 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2556 if ( stream_.state == STREAM_CLOSED ) {
\r
2557 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2558 error( RtAudioError::WARNING );
\r
2561 if ( stream_.bufferSize != nframes ) {
\r
2562 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2563 error( RtAudioError::WARNING );
\r
2567 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2568 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2570 // Check if we were draining the stream and signal is finished.
\r
2571 if ( handle->drainCounter > 3 ) {
\r
2572 ThreadHandle threadId;
\r
2574 stream_.state = STREAM_STOPPING;
\r
2575 if ( handle->internalDrain == true )
\r
2576 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2578 pthread_cond_signal( &handle->condition );
\r
2582 // Invoke user callback first, to get fresh output data.
\r
2583 if ( handle->drainCounter == 0 ) {
\r
2584 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2585 double streamTime = getStreamTime();
\r
2586 RtAudioStreamStatus status = 0;
\r
2587 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2588 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2589 handle->xrun[0] = false;
\r
2591 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2592 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2593 handle->xrun[1] = false;
\r
2595 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2596 stream_.bufferSize, streamTime, status, info->userData );
\r
2597 if ( cbReturnValue == 2 ) {
\r
2598 stream_.state = STREAM_STOPPING;
\r
2599 handle->drainCounter = 2;
\r
2601 pthread_create( &id, NULL, jackStopStream, info );
\r
2604 else if ( cbReturnValue == 1 ) {
\r
2605 handle->drainCounter = 1;
\r
2606 handle->internalDrain = true;
\r
2610 jack_default_audio_sample_t *jackbuffer;
\r
2611 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2612 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2614 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2616 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2617 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2618 memset( jackbuffer, 0, bufferBytes );
\r
2622 else if ( stream_.doConvertBuffer[0] ) {
\r
2624 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2626 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2627 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2628 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2631 else { // no buffer conversion
\r
2632 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2633 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2634 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2639 // Don't bother draining input
\r
2640 if ( handle->drainCounter ) {
\r
2641 handle->drainCounter++;
\r
2645 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2647 if ( stream_.doConvertBuffer[1] ) {
\r
2648 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2649 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2650 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2652 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2654 else { // no buffer conversion
\r
2655 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2657 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2663 RtApi::tickStreamTime();
\r
2666 //******************** End of __UNIX_JACK__ *********************//
\r
2669 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2671 // The ASIO API is designed around a callback scheme, so this
\r
2672 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2673 // Jack. The primary constraint with ASIO is that it only allows
\r
2674 // access to a single driver at a time. Thus, it is not possible to
\r
2675 // have more than one simultaneous RtAudio stream.
\r
2677 // This implementation also requires a number of external ASIO files
\r
2678 // and a few global variables. The ASIO callback scheme does not
\r
2679 // allow for the passing of user data, so we must create a global
\r
2680 // pointer to our callbackInfo structure.
\r
2682 // On unix systems, we make use of a pthread condition variable.
\r
2683 // Since there is no equivalent in Windows, I hacked something based
\r
2684 // on information found in
\r
2685 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2687 #include "asiosys.h"
\r
2689 #include "iasiothiscallresolver.h"
\r
2690 #include "asiodrivers.h"
\r
2693 static AsioDrivers drivers;
\r
2694 static ASIOCallbacks asioCallbacks;
\r
2695 static ASIODriverInfo driverInfo;
\r
2696 static CallbackInfo *asioCallbackInfo;
\r
2697 static bool asioXRun;
\r
2699 struct AsioHandle {
\r
2700 int drainCounter; // Tracks callback counts when draining
\r
2701 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2702 ASIOBufferInfo *bufferInfos;
\r
2706 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2709 // Function declarations (definitions at end of section)
\r
2710 static const char* getAsioErrorString( ASIOError result );
\r
2711 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2712 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2714 RtApiAsio :: RtApiAsio()
\r
2716 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2717 // CoInitialize beforehand, but it must be for appartment threading
\r
2718 // (in which case, CoInitilialize will return S_FALSE here).
\r
2719 coInitialized_ = false;
\r
2720 HRESULT hr = CoInitialize( NULL );
\r
2721 if ( FAILED(hr) ) {
\r
2722 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2723 error( RtAudioError::WARNING );
\r
2725 coInitialized_ = true;
\r
2727 drivers.removeCurrentDriver();
\r
2728 driverInfo.asioVersion = 2;
\r
2730 // See note in DirectSound implementation about GetDesktopWindow().
\r
2731 driverInfo.sysRef = GetForegroundWindow();
\r
2734 RtApiAsio :: ~RtApiAsio()
\r
2736 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2737 if ( coInitialized_ ) CoUninitialize();
\r
2740 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2742 return (unsigned int) drivers.asioGetNumDev();
\r
2745 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2747 RtAudio::DeviceInfo info;
\r
2748 info.probed = false;
\r
2751 unsigned int nDevices = getDeviceCount();
\r
2752 if ( nDevices == 0 ) {
\r
2753 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2754 error( RtAudioError::INVALID_USE );
\r
2758 if ( device >= nDevices ) {
\r
2759 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2760 error( RtAudioError::INVALID_USE );
\r
2764 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2765 if ( stream_.state != STREAM_CLOSED ) {
\r
2766 if ( device >= devices_.size() ) {
\r
2767 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2768 error( RtAudioError::WARNING );
\r
2771 return devices_[ device ];
\r
2774 char driverName[32];
\r
2775 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2776 if ( result != ASE_OK ) {
\r
2777 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2779 error( RtAudioError::WARNING );
\r
2783 info.name = driverName;
\r
2785 if ( !drivers.loadDriver( driverName ) ) {
\r
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2787 errorText_ = errorStream_.str();
\r
2788 error( RtAudioError::WARNING );
\r
2792 result = ASIOInit( &driverInfo );
\r
2793 if ( result != ASE_OK ) {
\r
2794 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2795 errorText_ = errorStream_.str();
\r
2796 error( RtAudioError::WARNING );
\r
2800 // Determine the device channel information.
\r
2801 long inputChannels, outputChannels;
\r
2802 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2803 if ( result != ASE_OK ) {
\r
2804 drivers.removeCurrentDriver();
\r
2805 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2806 errorText_ = errorStream_.str();
\r
2807 error( RtAudioError::WARNING );
\r
2811 info.outputChannels = outputChannels;
\r
2812 info.inputChannels = inputChannels;
\r
2813 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2814 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2816 // Determine the supported sample rates.
\r
2817 info.sampleRates.clear();
\r
2818 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2819 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2820 if ( result == ASE_OK ) {
\r
2821 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2823 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2824 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2828 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2829 ASIOChannelInfo channelInfo;
\r
2830 channelInfo.channel = 0;
\r
2831 channelInfo.isInput = true;
\r
2832 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2833 result = ASIOGetChannelInfo( &channelInfo );
\r
2834 if ( result != ASE_OK ) {
\r
2835 drivers.removeCurrentDriver();
\r
2836 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2837 errorText_ = errorStream_.str();
\r
2838 error( RtAudioError::WARNING );
\r
2842 info.nativeFormats = 0;
\r
2843 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2844 info.nativeFormats |= RTAUDIO_SINT16;
\r
2845 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2846 info.nativeFormats |= RTAUDIO_SINT32;
\r
2847 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2848 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2849 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2850 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2851 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2852 info.nativeFormats |= RTAUDIO_SINT24;
\r
2854 if ( info.outputChannels > 0 )
\r
2855 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2856 if ( info.inputChannels > 0 )
\r
2857 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2859 info.probed = true;
\r
2860 drivers.removeCurrentDriver();
\r
2864 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2866 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2867 object->callbackEvent( index );
\r
2870 void RtApiAsio :: saveDeviceInfo( void )
\r
2874 unsigned int nDevices = getDeviceCount();
\r
2875 devices_.resize( nDevices );
\r
2876 for ( unsigned int i=0; i<nDevices; i++ )
\r
2877 devices_[i] = getDeviceInfo( i );
\r
2880 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2881 unsigned int firstChannel, unsigned int sampleRate,
\r
2882 RtAudioFormat format, unsigned int *bufferSize,
\r
2883 RtAudio::StreamOptions *options )
\r
2884 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2886 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2888 // For ASIO, a duplex stream MUST use the same driver.
\r
2889 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2890 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2894 char driverName[32];
\r
2895 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2896 if ( result != ASE_OK ) {
\r
2897 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2898 errorText_ = errorStream_.str();
\r
2902 // Only load the driver once for duplex stream.
\r
2903 if ( !isDuplexInput ) {
\r
2904 // The getDeviceInfo() function will not work when a stream is open
\r
2905 // because ASIO does not allow multiple devices to run at the same
\r
2906 // time. Thus, we'll probe the system before opening a stream and
\r
2907 // save the results for use by getDeviceInfo().
\r
2908 this->saveDeviceInfo();
\r
2910 if ( !drivers.loadDriver( driverName ) ) {
\r
2911 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2912 errorText_ = errorStream_.str();
\r
2916 result = ASIOInit( &driverInfo );
\r
2917 if ( result != ASE_OK ) {
\r
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2919 errorText_ = errorStream_.str();
\r
2924 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2925 bool buffersAllocated = false;
\r
2926 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2927 unsigned int nChannels;
\r
2930 // Check the device channel count.
\r
2931 long inputChannels, outputChannels;
\r
2932 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2933 if ( result != ASE_OK ) {
\r
2934 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2935 errorText_ = errorStream_.str();
\r
2939 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2940 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2942 errorText_ = errorStream_.str();
\r
2945 stream_.nDeviceChannels[mode] = channels;
\r
2946 stream_.nUserChannels[mode] = channels;
\r
2947 stream_.channelOffset[mode] = firstChannel;
\r
2949 // Verify the sample rate is supported.
\r
2950 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2951 if ( result != ASE_OK ) {
\r
2952 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2953 errorText_ = errorStream_.str();
\r
2957 // Get the current sample rate
\r
2958 ASIOSampleRate currentRate;
\r
2959 result = ASIOGetSampleRate( ¤tRate );
\r
2960 if ( result != ASE_OK ) {
\r
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2962 errorText_ = errorStream_.str();
\r
2966 // Set the sample rate only if necessary
\r
2967 if ( currentRate != sampleRate ) {
\r
2968 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2969 if ( result != ASE_OK ) {
\r
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2971 errorText_ = errorStream_.str();
\r
2976 // Determine the driver data type.
\r
2977 ASIOChannelInfo channelInfo;
\r
2978 channelInfo.channel = 0;
\r
2979 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2980 else channelInfo.isInput = true;
\r
2981 result = ASIOGetChannelInfo( &channelInfo );
\r
2982 if ( result != ASE_OK ) {
\r
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2984 errorText_ = errorStream_.str();
\r
2988 // Assuming WINDOWS host is always little-endian.
\r
2989 stream_.doByteSwap[mode] = false;
\r
2990 stream_.userFormat = format;
\r
2991 stream_.deviceFormat[mode] = 0;
\r
2992 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2993 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2994 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2996 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2997 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2998 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3000 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3001 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3002 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3004 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3005 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3006 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3008 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3009 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3010 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3013 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3015 errorText_ = errorStream_.str();
\r
3019 // Set the buffer size. For a duplex stream, this will end up
\r
3020 // setting the buffer size based on the input constraints, which
\r
3022 long minSize, maxSize, preferSize, granularity;
\r
3023 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3024 if ( result != ASE_OK ) {
\r
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3026 errorText_ = errorStream_.str();
\r
3030 if ( isDuplexInput ) {
\r
3031 // When this is the duplex input (output was opened before), then we have to use the same
\r
3032 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3033 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3034 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3035 // to the "bufferSize" param as usual to set up processing buffers.
\r
3037 *bufferSize = stream_.bufferSize;
\r
3040 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3041 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3042 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3043 else if ( granularity == -1 ) {
\r
3044 // Make sure bufferSize is a power of two.
\r
3045 int log2_of_min_size = 0;
\r
3046 int log2_of_max_size = 0;
\r
3048 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3049 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3050 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3053 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3054 int min_delta_num = log2_of_min_size;
\r
3056 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3057 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3058 if (current_delta < min_delta) {
\r
3059 min_delta = current_delta;
\r
3060 min_delta_num = i;
\r
3064 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3065 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3066 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3068 else if ( granularity != 0 ) {
\r
3069 // Set to an even multiple of granularity, rounding up.
\r
3070 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3075 // we don't use it anymore, see above!
\r
3076 // Just left it here for the case...
\r
3077 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3078 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3083 stream_.bufferSize = *bufferSize;
\r
3084 stream_.nBuffers = 2;
\r
3086 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3087 else stream_.userInterleaved = true;
\r
3089 // ASIO always uses non-interleaved buffers.
\r
3090 stream_.deviceInterleaved[mode] = false;
\r
3092 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3093 if ( handle == 0 ) {
\r
3095 handle = new AsioHandle;
\r
3097 catch ( std::bad_alloc& ) {
\r
3098 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3101 handle->bufferInfos = 0;
\r
3103 // Create a manual-reset event.
\r
3104 handle->condition = CreateEvent( NULL, // no security
\r
3105 TRUE, // manual-reset
\r
3106 FALSE, // non-signaled initially
\r
3107 NULL ); // unnamed
\r
3108 stream_.apiHandle = (void *) handle;
\r
3111 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3112 // and output separately, we'll have to dispose of previously
\r
3113 // created output buffers for a duplex stream.
\r
3114 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3115 ASIODisposeBuffers();
\r
3116 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3119 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3121 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3122 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3123 if ( handle->bufferInfos == NULL ) {
\r
3124 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3125 errorText_ = errorStream_.str();
\r
3129 ASIOBufferInfo *infos;
\r
3130 infos = handle->bufferInfos;
\r
3131 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3132 infos->isInput = ASIOFalse;
\r
3133 infos->channelNum = i + stream_.channelOffset[0];
\r
3134 infos->buffers[0] = infos->buffers[1] = 0;
\r
3136 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3137 infos->isInput = ASIOTrue;
\r
3138 infos->channelNum = i + stream_.channelOffset[1];
\r
3139 infos->buffers[0] = infos->buffers[1] = 0;
\r
3142 // prepare for callbacks
\r
3143 stream_.sampleRate = sampleRate;
\r
3144 stream_.device[mode] = device;
\r
3145 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3147 // store this class instance before registering callbacks, that are going to use it
\r
3148 asioCallbackInfo = &stream_.callbackInfo;
\r
3149 stream_.callbackInfo.object = (void *) this;
\r
3151 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3152 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3153 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3154 asioCallbacks.asioMessage = &asioMessages;
\r
3155 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3156 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3157 if ( result != ASE_OK ) {
\r
3158 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3159 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3160 // in that case, let's be naïve and try that instead
\r
3161 *bufferSize = preferSize;
\r
3162 stream_.bufferSize = *bufferSize;
\r
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3166 if ( result != ASE_OK ) {
\r
3167 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3168 errorText_ = errorStream_.str();
\r
3171 buffersAllocated = true;
\r
3172 stream_.state = STREAM_STOPPED;
\r
3174 // Set flags for buffer conversion.
\r
3175 stream_.doConvertBuffer[mode] = false;
\r
3176 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3177 stream_.doConvertBuffer[mode] = true;
\r
3178 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3179 stream_.nUserChannels[mode] > 1 )
\r
3180 stream_.doConvertBuffer[mode] = true;
\r
3182 // Allocate necessary internal buffers
\r
3183 unsigned long bufferBytes;
\r
3184 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3185 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3186 if ( stream_.userBuffer[mode] == NULL ) {
\r
3187 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3191 if ( stream_.doConvertBuffer[mode] ) {
\r
3193 bool makeBuffer = true;
\r
3194 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3195 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3196 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3197 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3200 if ( makeBuffer ) {
\r
3201 bufferBytes *= *bufferSize;
\r
3202 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3203 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3204 if ( stream_.deviceBuffer == NULL ) {
\r
3205 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3211 // Determine device latencies
\r
3212 long inputLatency, outputLatency;
\r
3213 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3214 if ( result != ASE_OK ) {
\r
3215 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3216 errorText_ = errorStream_.str();
\r
3217 error( RtAudioError::WARNING); // warn but don't fail
\r
3220 stream_.latency[0] = outputLatency;
\r
3221 stream_.latency[1] = inputLatency;
\r
3224 // Setup the buffer conversion information structure. We don't use
\r
3225 // buffers to do channel offsets, so we override that parameter
\r
3227 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3232 if ( !isDuplexInput ) {
\r
3233 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3234 // So we clean up for single channel only
\r
3236 if ( buffersAllocated )
\r
3237 ASIODisposeBuffers();
\r
3239 drivers.removeCurrentDriver();
\r
3242 CloseHandle( handle->condition );
\r
3243 if ( handle->bufferInfos )
\r
3244 free( handle->bufferInfos );
\r
3247 stream_.apiHandle = 0;
\r
3251 if ( stream_.userBuffer[mode] ) {
\r
3252 free( stream_.userBuffer[mode] );
\r
3253 stream_.userBuffer[mode] = 0;
\r
3256 if ( stream_.deviceBuffer ) {
\r
3257 free( stream_.deviceBuffer );
\r
3258 stream_.deviceBuffer = 0;
\r
3263 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3265 void RtApiAsio :: closeStream()
\r
3267 if ( stream_.state == STREAM_CLOSED ) {
\r
3268 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3269 error( RtAudioError::WARNING );
\r
3273 if ( stream_.state == STREAM_RUNNING ) {
\r
3274 stream_.state = STREAM_STOPPED;
\r
3277 ASIODisposeBuffers();
\r
3278 drivers.removeCurrentDriver();
\r
3280 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3282 CloseHandle( handle->condition );
\r
3283 if ( handle->bufferInfos )
\r
3284 free( handle->bufferInfos );
\r
3286 stream_.apiHandle = 0;
\r
3289 for ( int i=0; i<2; i++ ) {
\r
3290 if ( stream_.userBuffer[i] ) {
\r
3291 free( stream_.userBuffer[i] );
\r
3292 stream_.userBuffer[i] = 0;
\r
3296 if ( stream_.deviceBuffer ) {
\r
3297 free( stream_.deviceBuffer );
\r
3298 stream_.deviceBuffer = 0;
\r
3301 stream_.mode = UNINITIALIZED;
\r
3302 stream_.state = STREAM_CLOSED;
\r
3305 bool stopThreadCalled = false;
\r
3307 void RtApiAsio :: startStream()
\r
3310 if ( stream_.state == STREAM_RUNNING ) {
\r
3311 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3312 error( RtAudioError::WARNING );
\r
3316 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3317 ASIOError result = ASIOStart();
\r
3318 if ( result != ASE_OK ) {
\r
3319 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3320 errorText_ = errorStream_.str();
\r
3324 handle->drainCounter = 0;
\r
3325 handle->internalDrain = false;
\r
3326 ResetEvent( handle->condition );
\r
3327 stream_.state = STREAM_RUNNING;
\r
3331 stopThreadCalled = false;
\r
3333 if ( result == ASE_OK ) return;
\r
3334 error( RtAudioError::SYSTEM_ERROR );
\r
3337 void RtApiAsio :: stopStream()
\r
3340 if ( stream_.state == STREAM_STOPPED ) {
\r
3341 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3342 error( RtAudioError::WARNING );
\r
3346 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3347 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3348 if ( handle->drainCounter == 0 ) {
\r
3349 handle->drainCounter = 2;
\r
3350 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3354 stream_.state = STREAM_STOPPED;
\r
3356 ASIOError result = ASIOStop();
\r
3357 if ( result != ASE_OK ) {
\r
3358 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3359 errorText_ = errorStream_.str();
\r
3362 if ( result == ASE_OK ) return;
\r
3363 error( RtAudioError::SYSTEM_ERROR );
\r
3366 void RtApiAsio :: abortStream()
\r
3369 if ( stream_.state == STREAM_STOPPED ) {
\r
3370 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3371 error( RtAudioError::WARNING );
\r
3375 // The following lines were commented-out because some behavior was
\r
3376 // noted where the device buffers need to be zeroed to avoid
\r
3377 // continuing sound, even when the device buffers are completely
\r
3378 // disposed. So now, calling abort is the same as calling stop.
\r
3379 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3380 // handle->drainCounter = 2;
\r
3384 // This function will be called by a spawned thread when the user
\r
3385 // callback function signals that the stream should be stopped or
\r
3386 // aborted. It is necessary to handle it this way because the
\r
3387 // callbackEvent() function must return before the ASIOStop()
\r
3388 // function will return.
\r
3389 static unsigned __stdcall asioStopStream( void *ptr )
\r
3391 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3392 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3394 object->stopStream();
\r
3395 _endthreadex( 0 );
\r
3399 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3401 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3402 if ( stream_.state == STREAM_CLOSED ) {
\r
3403 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3404 error( RtAudioError::WARNING );
\r
3408 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3409 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3411 // Check if we were draining the stream and signal if finished.
\r
3412 if ( handle->drainCounter > 3 ) {
\r
3414 stream_.state = STREAM_STOPPING;
\r
3415 if ( handle->internalDrain == false )
\r
3416 SetEvent( handle->condition );
\r
3417 else { // spawn a thread to stop the stream
\r
3418 unsigned threadId;
\r
3419 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3420 &stream_.callbackInfo, 0, &threadId );
\r
3425 // Invoke user callback to get fresh output data UNLESS we are
\r
3426 // draining stream.
\r
3427 if ( handle->drainCounter == 0 ) {
\r
3428 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3429 double streamTime = getStreamTime();
\r
3430 RtAudioStreamStatus status = 0;
\r
3431 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3432 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3435 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3436 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3439 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3440 stream_.bufferSize, streamTime, status, info->userData );
\r
3441 if ( cbReturnValue == 2 ) {
\r
3442 stream_.state = STREAM_STOPPING;
\r
3443 handle->drainCounter = 2;
\r
3444 unsigned threadId;
\r
3445 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3446 &stream_.callbackInfo, 0, &threadId );
\r
3449 else if ( cbReturnValue == 1 ) {
\r
3450 handle->drainCounter = 1;
\r
3451 handle->internalDrain = true;
\r
3455 unsigned int nChannels, bufferBytes, i, j;
\r
3456 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3457 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3459 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3461 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3463 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3464 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3465 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3469 else if ( stream_.doConvertBuffer[0] ) {
\r
3471 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3472 if ( stream_.doByteSwap[0] )
\r
3473 byteSwapBuffer( stream_.deviceBuffer,
\r
3474 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3475 stream_.deviceFormat[0] );
\r
3477 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3478 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3479 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3480 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3486 if ( stream_.doByteSwap[0] )
\r
3487 byteSwapBuffer( stream_.userBuffer[0],
\r
3488 stream_.bufferSize * stream_.nUserChannels[0],
\r
3489 stream_.userFormat );
\r
3491 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3492 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3493 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3494 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3500 // Don't bother draining input
\r
3501 if ( handle->drainCounter ) {
\r
3502 handle->drainCounter++;
\r
3506 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3508 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3510 if (stream_.doConvertBuffer[1]) {
\r
3512 // Always interleave ASIO input data.
\r
3513 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3514 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3515 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3516 handle->bufferInfos[i].buffers[bufferIndex],
\r
3520 if ( stream_.doByteSwap[1] )
\r
3521 byteSwapBuffer( stream_.deviceBuffer,
\r
3522 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3523 stream_.deviceFormat[1] );
\r
3524 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3528 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3529 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3530 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3531 handle->bufferInfos[i].buffers[bufferIndex],
\r
3536 if ( stream_.doByteSwap[1] )
\r
3537 byteSwapBuffer( stream_.userBuffer[1],
\r
3538 stream_.bufferSize * stream_.nUserChannels[1],
\r
3539 stream_.userFormat );
\r
3544 // The following call was suggested by Malte Clasen. While the API
\r
3545 // documentation indicates it should not be required, some device
\r
3546 // drivers apparently do not function correctly without it.
\r
3547 ASIOOutputReady();
\r
3549 RtApi::tickStreamTime();
\r
3553 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3555 // The ASIO documentation says that this usually only happens during
\r
3556 // external sync. Audio processing is not stopped by the driver,
\r
3557 // actual sample rate might not have even changed, maybe only the
\r
3558 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3561 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3563 object->stopStream();
\r
3565 catch ( RtAudioError &exception ) {
\r
3566 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3570 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3573 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3577 switch( selector ) {
\r
3578 case kAsioSelectorSupported:
\r
3579 if ( value == kAsioResetRequest
\r
3580 || value == kAsioEngineVersion
\r
3581 || value == kAsioResyncRequest
\r
3582 || value == kAsioLatenciesChanged
\r
3583 // The following three were added for ASIO 2.0, you don't
\r
3584 // necessarily have to support them.
\r
3585 || value == kAsioSupportsTimeInfo
\r
3586 || value == kAsioSupportsTimeCode
\r
3587 || value == kAsioSupportsInputMonitor)
\r
3590 case kAsioResetRequest:
\r
3591 // Defer the task and perform the reset of the driver during the
\r
3592 // next "safe" situation. You cannot reset the driver right now,
\r
3593 // as this code is called from the driver. Reset the driver is
\r
3594 // done by completely destruct is. I.e. ASIOStop(),
\r
3595 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3597 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3600 case kAsioResyncRequest:
\r
3601 // This informs the application that the driver encountered some
\r
3602 // non-fatal data loss. It is used for synchronization purposes
\r
3603 // of different media. Added mainly to work around the Win16Mutex
\r
3604 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3605 // which could lose data because the Mutex was held too long by
\r
3606 // another thread. However a driver can issue it in other
\r
3607 // situations, too.
\r
3608 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3612 case kAsioLatenciesChanged:
\r
3613 // This will inform the host application that the drivers were
\r
3614 // latencies changed. Beware, it this does not mean that the
\r
3615 // buffer sizes have changed! You might need to update internal
\r
3617 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3620 case kAsioEngineVersion:
\r
3621 // Return the supported ASIO version of the host application. If
\r
3622 // a host application does not implement this selector, ASIO 1.0
\r
3623 // is assumed by the driver.
\r
3626 case kAsioSupportsTimeInfo:
\r
3627 // Informs the driver whether the
\r
3628 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3629 // For compatibility with ASIO 1.0 drivers the host application
\r
3630 // should always support the "old" bufferSwitch method, too.
\r
3633 case kAsioSupportsTimeCode:
\r
3634 // Informs the driver whether application is interested in time
\r
3635 // code info. If an application does not need to know about time
\r
3636 // code, the driver has less work to do.
\r
3643 static const char* getAsioErrorString( ASIOError result )
\r
3648 const char*message;
\r
3651 static const Messages m[] =
\r
3653 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3654 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3655 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3656 { ASE_InvalidMode, "Invalid mode." },
\r
3657 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3658 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3659 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3662 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3663 if ( m[i].value == result ) return m[i].message;
\r
3665 return "Unknown error.";
\r
3668 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3672 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3674 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3675 // - Introduces support for the Windows WASAPI API
\r
3676 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3677 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3678 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3683 #include <audioclient.h>
\r
3685 #include <mmdeviceapi.h>
\r
3686 #include <functiondiscoverykeys_devpkey.h>
\r
3688 //=============================================================================
\r
3690 #define SAFE_RELEASE( objectPtr )\
\r
3693 objectPtr->Release();\
\r
3694 objectPtr = NULL;\
\r
3697 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3699 //-----------------------------------------------------------------------------
\r
3701 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3702 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3703 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3704 // provide intermediate storage for read / write synchronization.
\r
3705 class WasapiBuffer
\r
3709 : buffer_( NULL ),
\r
3718 // sets the length of the internal ring buffer
\r
3719 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3722 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3724 bufferSize_ = bufferSize;
\r
3729 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3730 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3732 if ( !buffer || // incoming buffer is NULL
\r
3733 bufferSize == 0 || // incoming buffer has no data
\r
3734 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3739 unsigned int relOutIndex = outIndex_;
\r
3740 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3741 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3742 relOutIndex += bufferSize_;
\r
3745 // "in" index can end on the "out" index but cannot begin at it
\r
3746 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3747 return false; // not enough space between "in" index and "out" index
\r
3750 // copy buffer from external to internal
\r
3751 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3752 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3753 int fromInSize = bufferSize - fromZeroSize;
\r
3757 case RTAUDIO_SINT8:
\r
3758 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3759 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3761 case RTAUDIO_SINT16:
\r
3762 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3763 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3765 case RTAUDIO_SINT24:
\r
3766 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3767 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3769 case RTAUDIO_SINT32:
\r
3770 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3771 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3773 case RTAUDIO_FLOAT32:
\r
3774 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3775 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3777 case RTAUDIO_FLOAT64:
\r
3778 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3779 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3783 // update "in" index
\r
3784 inIndex_ += bufferSize;
\r
3785 inIndex_ %= bufferSize_;
\r
3790 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3791 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3793 if ( !buffer || // incoming buffer is NULL
\r
3794 bufferSize == 0 || // incoming buffer has no data
\r
3795 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3800 unsigned int relInIndex = inIndex_;
\r
3801 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3802 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3803 relInIndex += bufferSize_;
\r
3806 // "out" index can begin at and end on the "in" index
\r
3807 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3808 return false; // not enough space between "out" index and "in" index
\r
3811 // copy buffer from internal to external
\r
3812 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3813 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3814 int fromOutSize = bufferSize - fromZeroSize;
\r
3818 case RTAUDIO_SINT8:
\r
3819 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3820 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3822 case RTAUDIO_SINT16:
\r
3823 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3824 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3826 case RTAUDIO_SINT24:
\r
3827 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3828 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3830 case RTAUDIO_SINT32:
\r
3831 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3832 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3834 case RTAUDIO_FLOAT32:
\r
3835 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3836 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3838 case RTAUDIO_FLOAT64:
\r
3839 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3840 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3844 // update "out" index
\r
3845 outIndex_ += bufferSize;
\r
3846 outIndex_ %= bufferSize_;
\r
3853 unsigned int bufferSize_;
\r
3854 unsigned int inIndex_;
\r
3855 unsigned int outIndex_;
\r
3858 //-----------------------------------------------------------------------------
\r
3860 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3861 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3862 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3863 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3864 // one rate and its multiple.
\r
3865 void convertBufferWasapi( char* outBuffer,
\r
3866 const char* inBuffer,
\r
3867 const unsigned int& channelCount,
\r
3868 const unsigned int& inSampleRate,
\r
3869 const unsigned int& outSampleRate,
\r
3870 const unsigned int& inSampleCount,
\r
3871 unsigned int& outSampleCount,
\r
3872 const RtAudioFormat& format )
\r
3874 // calculate the new outSampleCount and relative sampleStep
\r
3875 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3876 float sampleStep = 1.0f / sampleRatio;
\r
3877 float inSampleFraction = 0.0f;
\r
3879 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3881 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3882 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3884 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3888 case RTAUDIO_SINT8:
\r
3889 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3891 case RTAUDIO_SINT16:
\r
3892 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3894 case RTAUDIO_SINT24:
\r
3895 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3897 case RTAUDIO_SINT32:
\r
3898 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3900 case RTAUDIO_FLOAT32:
\r
3901 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3903 case RTAUDIO_FLOAT64:
\r
3904 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3908 // jump to next in sample
\r
3909 inSampleFraction += sampleStep;
\r
3913 //-----------------------------------------------------------------------------
\r
3915 // A structure to hold various information related to the WASAPI implementation.
\r
3916 struct WasapiHandle
\r
3918 IAudioClient* captureAudioClient;
\r
3919 IAudioClient* renderAudioClient;
\r
3920 IAudioCaptureClient* captureClient;
\r
3921 IAudioRenderClient* renderClient;
\r
3922 HANDLE captureEvent;
\r
3923 HANDLE renderEvent;
\r
3926 : captureAudioClient( NULL ),
\r
3927 renderAudioClient( NULL ),
\r
3928 captureClient( NULL ),
\r
3929 renderClient( NULL ),
\r
3930 captureEvent( NULL ),
\r
3931 renderEvent( NULL ) {}
\r
3934 //=============================================================================
\r
3936 RtApiWasapi::RtApiWasapi()
\r
3937 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3939 // WASAPI can run either apartment or multi-threaded
\r
3940 HRESULT hr = CoInitialize( NULL );
\r
3941 if ( !FAILED( hr ) )
\r
3942 coInitialized_ = true;
\r
3944 // Instantiate device enumerator
\r
3945 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3946 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3947 ( void** ) &deviceEnumerator_ );
\r
3949 if ( FAILED( hr ) ) {
\r
3950 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3951 error( RtAudioError::DRIVER_ERROR );
\r
3955 //-----------------------------------------------------------------------------
\r
3957 RtApiWasapi::~RtApiWasapi()
\r
3959 if ( stream_.state != STREAM_CLOSED )
\r
3962 SAFE_RELEASE( deviceEnumerator_ );
\r
3964 // If this object previously called CoInitialize()
\r
3965 if ( coInitialized_ )
\r
3969 //=============================================================================
\r
3971 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3973 unsigned int captureDeviceCount = 0;
\r
3974 unsigned int renderDeviceCount = 0;
\r
3976 IMMDeviceCollection* captureDevices = NULL;
\r
3977 IMMDeviceCollection* renderDevices = NULL;
\r
3979 // Count capture devices
\r
3980 errorText_.clear();
\r
3981 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3982 if ( FAILED( hr ) ) {
\r
3983 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3987 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3988 if ( FAILED( hr ) ) {
\r
3989 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3993 // Count render devices
\r
3994 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3995 if ( FAILED( hr ) ) {
\r
3996 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4000 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4001 if ( FAILED( hr ) ) {
\r
4002 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4007 // release all references
\r
4008 SAFE_RELEASE( captureDevices );
\r
4009 SAFE_RELEASE( renderDevices );
\r
4011 if ( errorText_.empty() )
\r
4012 return captureDeviceCount + renderDeviceCount;
\r
4014 error( RtAudioError::DRIVER_ERROR );
\r
4018 //-----------------------------------------------------------------------------
\r
4020 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4022 RtAudio::DeviceInfo info;
\r
4023 unsigned int captureDeviceCount = 0;
\r
4024 unsigned int renderDeviceCount = 0;
\r
4025 std::string defaultDeviceName;
\r
4026 bool isCaptureDevice = false;
\r
4028 PROPVARIANT deviceNameProp;
\r
4029 PROPVARIANT defaultDeviceNameProp;
\r
4031 IMMDeviceCollection* captureDevices = NULL;
\r
4032 IMMDeviceCollection* renderDevices = NULL;
\r
4033 IMMDevice* devicePtr = NULL;
\r
4034 IMMDevice* defaultDevicePtr = NULL;
\r
4035 IAudioClient* audioClient = NULL;
\r
4036 IPropertyStore* devicePropStore = NULL;
\r
4037 IPropertyStore* defaultDevicePropStore = NULL;
\r
4039 WAVEFORMATEX* deviceFormat = NULL;
\r
4040 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4043 info.probed = false;
\r
4045 // Count capture devices
\r
4046 errorText_.clear();
\r
4047 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4048 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4049 if ( FAILED( hr ) ) {
\r
4050 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4054 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4055 if ( FAILED( hr ) ) {
\r
4056 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4060 // Count render devices
\r
4061 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4062 if ( FAILED( hr ) ) {
\r
4063 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4067 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4068 if ( FAILED( hr ) ) {
\r
4069 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4073 // validate device index
\r
4074 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4076 errorType = RtAudioError::INVALID_USE;
\r
4080 // determine whether index falls within capture or render devices
\r
4081 if ( device >= renderDeviceCount ) {
\r
4082 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4083 if ( FAILED( hr ) ) {
\r
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4087 isCaptureDevice = true;
\r
4090 hr = renderDevices->Item( device, &devicePtr );
\r
4091 if ( FAILED( hr ) ) {
\r
4092 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4095 isCaptureDevice = false;
\r
4098 // get default device name
\r
4099 if ( isCaptureDevice ) {
\r
4100 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4101 if ( FAILED( hr ) ) {
\r
4102 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4107 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4108 if ( FAILED( hr ) ) {
\r
4109 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4114 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4115 if ( FAILED( hr ) ) {
\r
4116 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4119 PropVariantInit( &defaultDeviceNameProp );
\r
4121 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4122 if ( FAILED( hr ) ) {
\r
4123 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4127 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4130 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4131 if ( FAILED( hr ) ) {
\r
4132 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4136 PropVariantInit( &deviceNameProp );
\r
4138 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4139 if ( FAILED( hr ) ) {
\r
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4144 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4147 if ( isCaptureDevice ) {
\r
4148 info.isDefaultInput = info.name == defaultDeviceName;
\r
4149 info.isDefaultOutput = false;
\r
4152 info.isDefaultInput = false;
\r
4153 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4157 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4158 if ( FAILED( hr ) ) {
\r
4159 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4163 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4164 if ( FAILED( hr ) ) {
\r
4165 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4169 if ( isCaptureDevice ) {
\r
4170 info.inputChannels = deviceFormat->nChannels;
\r
4171 info.outputChannels = 0;
\r
4172 info.duplexChannels = 0;
\r
4175 info.inputChannels = 0;
\r
4176 info.outputChannels = deviceFormat->nChannels;
\r
4177 info.duplexChannels = 0;
\r
4181 info.sampleRates.clear();
\r
4183 // allow support for all sample rates as we have a built-in sample rate converter
\r
4184 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4185 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4187 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4190 info.nativeFormats = 0;
\r
4192 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4193 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4194 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4196 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4197 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4199 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4200 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4203 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4204 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4205 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4207 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4208 info.nativeFormats |= RTAUDIO_SINT8;
\r
4210 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4211 info.nativeFormats |= RTAUDIO_SINT16;
\r
4213 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4214 info.nativeFormats |= RTAUDIO_SINT24;
\r
4216 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4217 info.nativeFormats |= RTAUDIO_SINT32;
\r
4222 info.probed = true;
\r
4225 // release all references
\r
4226 PropVariantClear( &deviceNameProp );
\r
4227 PropVariantClear( &defaultDeviceNameProp );
\r
4229 SAFE_RELEASE( captureDevices );
\r
4230 SAFE_RELEASE( renderDevices );
\r
4231 SAFE_RELEASE( devicePtr );
\r
4232 SAFE_RELEASE( defaultDevicePtr );
\r
4233 SAFE_RELEASE( audioClient );
\r
4234 SAFE_RELEASE( devicePropStore );
\r
4235 SAFE_RELEASE( defaultDevicePropStore );
\r
4237 CoTaskMemFree( deviceFormat );
\r
4238 CoTaskMemFree( closestMatchFormat );
\r
4240 if ( !errorText_.empty() )
\r
4241 error( errorType );
\r
4245 //-----------------------------------------------------------------------------
\r
4247 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4249 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4250 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4258 //-----------------------------------------------------------------------------
\r
4260 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4262 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4263 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4271 //-----------------------------------------------------------------------------
\r
4273 void RtApiWasapi::closeStream( void )
\r
4275 if ( stream_.state == STREAM_CLOSED ) {
\r
4276 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4277 error( RtAudioError::WARNING );
\r
4281 if ( stream_.state != STREAM_STOPPED )
\r
4284 // clean up stream memory
\r
4285 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4286 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4288 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4289 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4291 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4292 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4294 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4295 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4297 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4298 stream_.apiHandle = NULL;
\r
4300 for ( int i = 0; i < 2; i++ ) {
\r
4301 if ( stream_.userBuffer[i] ) {
\r
4302 free( stream_.userBuffer[i] );
\r
4303 stream_.userBuffer[i] = 0;
\r
4307 if ( stream_.deviceBuffer ) {
\r
4308 free( stream_.deviceBuffer );
\r
4309 stream_.deviceBuffer = 0;
\r
4312 // update stream state
\r
4313 stream_.state = STREAM_CLOSED;
\r
4316 //-----------------------------------------------------------------------------
\r
4318 void RtApiWasapi::startStream( void )
\r
4322 if ( stream_.state == STREAM_RUNNING ) {
\r
4323 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4324 error( RtAudioError::WARNING );
\r
4328 // update stream state
\r
4329 stream_.state = STREAM_RUNNING;
\r
4331 // create WASAPI stream thread
\r
4332 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4334 if ( !stream_.callbackInfo.thread ) {
\r
4335 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4336 error( RtAudioError::THREAD_ERROR );
\r
4339 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4340 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4344 //-----------------------------------------------------------------------------
\r
4346 void RtApiWasapi::stopStream( void )
\r
4350 if ( stream_.state == STREAM_STOPPED ) {
\r
4351 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4352 error( RtAudioError::WARNING );
\r
4356 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4357 stream_.state = STREAM_STOPPING;
\r
4359 // wait until stream thread is stopped
\r
4360 while( stream_.state != STREAM_STOPPED ) {
\r
4364 // Wait for the last buffer to play before stopping.
\r
4365 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4367 // stop capture client if applicable
\r
4368 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4369 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4370 if ( FAILED( hr ) ) {
\r
4371 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4372 error( RtAudioError::DRIVER_ERROR );
\r
4377 // stop render client if applicable
\r
4378 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4379 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4380 if ( FAILED( hr ) ) {
\r
4381 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4382 error( RtAudioError::DRIVER_ERROR );
\r
4387 // close thread handle
\r
4388 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4389 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4390 error( RtAudioError::THREAD_ERROR );
\r
4394 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4397 //-----------------------------------------------------------------------------
\r
4399 void RtApiWasapi::abortStream( void )
\r
4403 if ( stream_.state == STREAM_STOPPED ) {
\r
4404 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4405 error( RtAudioError::WARNING );
\r
4409 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4410 stream_.state = STREAM_STOPPING;
\r
4412 // wait until stream thread is stopped
\r
4413 while ( stream_.state != STREAM_STOPPED ) {
\r
4417 // stop capture client if applicable
\r
4418 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4419 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4420 if ( FAILED( hr ) ) {
\r
4421 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4422 error( RtAudioError::DRIVER_ERROR );
\r
4427 // stop render client if applicable
\r
4428 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4429 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4430 if ( FAILED( hr ) ) {
\r
4431 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4432 error( RtAudioError::DRIVER_ERROR );
\r
4437 // close thread handle
\r
4438 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4439 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4440 error( RtAudioError::THREAD_ERROR );
\r
4444 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4447 //-----------------------------------------------------------------------------
\r
4449 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4450 unsigned int firstChannel, unsigned int sampleRate,
\r
4451 RtAudioFormat format, unsigned int* bufferSize,
\r
4452 RtAudio::StreamOptions* options )
\r
4454 bool methodResult = FAILURE;
\r
4455 unsigned int captureDeviceCount = 0;
\r
4456 unsigned int renderDeviceCount = 0;
\r
4458 IMMDeviceCollection* captureDevices = NULL;
\r
4459 IMMDeviceCollection* renderDevices = NULL;
\r
4460 IMMDevice* devicePtr = NULL;
\r
4461 WAVEFORMATEX* deviceFormat = NULL;
\r
4462 unsigned int bufferBytes;
\r
4463 stream_.state = STREAM_STOPPED;
\r
4465 // create API Handle if not already created
\r
4466 if ( !stream_.apiHandle )
\r
4467 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4469 // Count capture devices
\r
4470 errorText_.clear();
\r
4471 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4472 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4473 if ( FAILED( hr ) ) {
\r
4474 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4478 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4479 if ( FAILED( hr ) ) {
\r
4480 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4484 // Count render devices
\r
4485 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4486 if ( FAILED( hr ) ) {
\r
4487 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4491 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4492 if ( FAILED( hr ) ) {
\r
4493 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4497 // validate device index
\r
4498 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4499 errorType = RtAudioError::INVALID_USE;
\r
4500 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4504 // determine whether index falls within capture or render devices
\r
4505 if ( device >= renderDeviceCount ) {
\r
4506 if ( mode != INPUT ) {
\r
4507 errorType = RtAudioError::INVALID_USE;
\r
4508 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4512 // retrieve captureAudioClient from devicePtr
\r
4513 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4515 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4516 if ( FAILED( hr ) ) {
\r
4517 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4521 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4522 NULL, ( void** ) &captureAudioClient );
\r
4523 if ( FAILED( hr ) ) {
\r
4524 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4528 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4529 if ( FAILED( hr ) ) {
\r
4530 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4534 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4535 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4538 if ( mode != OUTPUT ) {
\r
4539 errorType = RtAudioError::INVALID_USE;
\r
4540 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4544 // retrieve renderAudioClient from devicePtr
\r
4545 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4547 hr = renderDevices->Item( device, &devicePtr );
\r
4548 if ( FAILED( hr ) ) {
\r
4549 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4553 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4554 NULL, ( void** ) &renderAudioClient );
\r
4555 if ( FAILED( hr ) ) {
\r
4556 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4560 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4561 if ( FAILED( hr ) ) {
\r
4562 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4566 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4567 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4570 // fill stream data
\r
4571 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4572 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4573 stream_.mode = DUPLEX;
\r
4576 stream_.mode = mode;
\r
4579 stream_.device[mode] = device;
\r
4580 stream_.doByteSwap[mode] = false;
\r
4581 stream_.sampleRate = sampleRate;
\r
4582 stream_.bufferSize = *bufferSize;
\r
4583 stream_.nBuffers = 1;
\r
4584 stream_.nUserChannels[mode] = channels;
\r
4585 stream_.channelOffset[mode] = firstChannel;
\r
4586 stream_.userFormat = format;
\r
4587 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4589 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4590 stream_.userInterleaved = false;
\r
4592 stream_.userInterleaved = true;
\r
4593 stream_.deviceInterleaved[mode] = true;
\r
4595 // Set flags for buffer conversion.
\r
4596 stream_.doConvertBuffer[mode] = false;
\r
4597 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4598 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4599 stream_.doConvertBuffer[mode] = true;
\r
4600 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4601 stream_.nUserChannels[mode] > 1 )
\r
4602 stream_.doConvertBuffer[mode] = true;
\r
4604 if ( stream_.doConvertBuffer[mode] )
\r
4605 setConvertInfo( mode, 0 );
\r
4607 // Allocate necessary internal buffers
\r
4608 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4610 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4611 if ( !stream_.userBuffer[mode] ) {
\r
4612 errorType = RtAudioError::MEMORY_ERROR;
\r
4613 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4617 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4618 stream_.callbackInfo.priority = 15;
\r
4620 stream_.callbackInfo.priority = 0;
\r
4622 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4623 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4625 methodResult = SUCCESS;
\r
4629 SAFE_RELEASE( captureDevices );
\r
4630 SAFE_RELEASE( renderDevices );
\r
4631 SAFE_RELEASE( devicePtr );
\r
4632 CoTaskMemFree( deviceFormat );
\r
4634 // if method failed, close the stream
\r
4635 if ( methodResult == FAILURE )
\r
4638 if ( !errorText_.empty() )
\r
4639 error( errorType );
\r
4640 return methodResult;
\r
4643 //=============================================================================
\r
4645 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4648 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4653 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4656 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4661 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4664 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4669 //-----------------------------------------------------------------------------
\r
4671 void RtApiWasapi::wasapiThread()
\r
4673 // as this is a new thread, we must CoInitialize it
\r
4674 CoInitialize( NULL );
\r
4678 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4679 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4680 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4681 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4682 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4683 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4685 WAVEFORMATEX* captureFormat = NULL;
\r
4686 WAVEFORMATEX* renderFormat = NULL;
\r
4687 float captureSrRatio = 0.0f;
\r
4688 float renderSrRatio = 0.0f;
\r
4689 WasapiBuffer captureBuffer;
\r
4690 WasapiBuffer renderBuffer;
\r
4692 // declare local stream variables
\r
4693 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4694 BYTE* streamBuffer = NULL;
\r
4695 unsigned long captureFlags = 0;
\r
4696 unsigned int bufferFrameCount = 0;
\r
4697 unsigned int numFramesPadding = 0;
\r
4698 unsigned int convBufferSize = 0;
\r
4699 bool callbackPushed = false;
\r
4700 bool callbackPulled = false;
\r
4701 bool callbackStopped = false;
\r
4702 int callbackResult = 0;
\r
4704 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4705 char* convBuffer = NULL;
\r
4706 unsigned int convBuffSize = 0;
\r
4707 unsigned int deviceBuffSize = 0;
\r
4709 errorText_.clear();
\r
4710 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4712 // Attempt to assign "Pro Audio" characteristic to thread
\r
4713 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4715 DWORD taskIndex = 0;
\r
4716 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4717 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4718 FreeLibrary( AvrtDll );
\r
4721 // start capture stream if applicable
\r
4722 if ( captureAudioClient ) {
\r
4723 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4724 if ( FAILED( hr ) ) {
\r
4725 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4729 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4731 // initialize capture stream according to desire buffer size
\r
4732 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4733 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4735 if ( !captureClient ) {
\r
4736 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4737 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4738 desiredBufferPeriod,
\r
4739 desiredBufferPeriod,
\r
4742 if ( FAILED( hr ) ) {
\r
4743 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4747 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4748 ( void** ) &captureClient );
\r
4749 if ( FAILED( hr ) ) {
\r
4750 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4754 // configure captureEvent to trigger on every available capture buffer
\r
4755 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4756 if ( !captureEvent ) {
\r
4757 errorType = RtAudioError::SYSTEM_ERROR;
\r
4758 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4762 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4763 if ( FAILED( hr ) ) {
\r
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4768 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4769 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4772 unsigned int inBufferSize = 0;
\r
4773 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4774 if ( FAILED( hr ) ) {
\r
4775 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4779 // scale outBufferSize according to stream->user sample rate ratio
\r
4780 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4781 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4783 // set captureBuffer size
\r
4784 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4786 // reset the capture stream
\r
4787 hr = captureAudioClient->Reset();
\r
4788 if ( FAILED( hr ) ) {
\r
4789 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4793 // start the capture stream
\r
4794 hr = captureAudioClient->Start();
\r
4795 if ( FAILED( hr ) ) {
\r
4796 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4801 // start render stream if applicable
\r
4802 if ( renderAudioClient ) {
\r
4803 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4804 if ( FAILED( hr ) ) {
\r
4805 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4809 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4811 // initialize render stream according to desire buffer size
\r
4812 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4813 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4815 if ( !renderClient ) {
\r
4816 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4817 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4818 desiredBufferPeriod,
\r
4819 desiredBufferPeriod,
\r
4822 if ( FAILED( hr ) ) {
\r
4823 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4827 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4828 ( void** ) &renderClient );
\r
4829 if ( FAILED( hr ) ) {
\r
4830 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4834 // configure renderEvent to trigger on every available render buffer
\r
4835 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4836 if ( !renderEvent ) {
\r
4837 errorType = RtAudioError::SYSTEM_ERROR;
\r
4838 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4842 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4843 if ( FAILED( hr ) ) {
\r
4844 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4848 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4849 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4852 unsigned int outBufferSize = 0;
\r
4853 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4854 if ( FAILED( hr ) ) {
\r
4855 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4859 // scale inBufferSize according to user->stream sample rate ratio
\r
4860 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4861 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4863 // set renderBuffer size
\r
4864 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4866 // reset the render stream
\r
4867 hr = renderAudioClient->Reset();
\r
4868 if ( FAILED( hr ) ) {
\r
4869 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4873 // start the render stream
\r
4874 hr = renderAudioClient->Start();
\r
4875 if ( FAILED( hr ) ) {
\r
4876 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4881 if ( stream_.mode == INPUT ) {
\r
4882 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4883 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4885 else if ( stream_.mode == OUTPUT ) {
\r
4886 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4887 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4889 else if ( stream_.mode == DUPLEX ) {
\r
4890 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4891 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4892 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4893 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4896 convBuffer = ( char* ) malloc( convBuffSize );
\r
4897 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4898 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4899 errorType = RtAudioError::MEMORY_ERROR;
\r
4900 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4904 // stream process loop
\r
4905 while ( stream_.state != STREAM_STOPPING ) {
\r
4906 if ( !callbackPulled ) {
\r
4909 // 1. Pull callback buffer from inputBuffer
\r
4910 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4911 // Convert callback buffer to user format
\r
4913 if ( captureAudioClient ) {
\r
4914 // Pull callback buffer from inputBuffer
\r
4915 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4916 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4917 stream_.deviceFormat[INPUT] );
\r
4919 if ( callbackPulled ) {
\r
4920 // Convert callback buffer to user sample rate
\r
4921 convertBufferWasapi( stream_.deviceBuffer,
\r
4923 stream_.nDeviceChannels[INPUT],
\r
4924 captureFormat->nSamplesPerSec,
\r
4925 stream_.sampleRate,
\r
4926 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4928 stream_.deviceFormat[INPUT] );
\r
4930 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4931 // Convert callback buffer to user format
\r
4932 convertBuffer( stream_.userBuffer[INPUT],
\r
4933 stream_.deviceBuffer,
\r
4934 stream_.convertInfo[INPUT] );
\r
4937 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4938 memcpy( stream_.userBuffer[INPUT],
\r
4939 stream_.deviceBuffer,
\r
4940 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4945 // if there is no capture stream, set callbackPulled flag
\r
4946 callbackPulled = true;
\r
4949 // Execute Callback
\r
4950 // ================
\r
4951 // 1. Execute user callback method
\r
4952 // 2. Handle return value from callback
\r
4954 // if callback has not requested the stream to stop
\r
4955 if ( callbackPulled && !callbackStopped ) {
\r
4956 // Execute user callback method
\r
4957 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4958 stream_.userBuffer[INPUT],
\r
4959 stream_.bufferSize,
\r
4961 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4962 stream_.callbackInfo.userData );
\r
4964 // Handle return value from callback
\r
4965 if ( callbackResult == 1 ) {
\r
4966 // instantiate a thread to stop this thread
\r
4967 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4968 if ( !threadHandle ) {
\r
4969 errorType = RtAudioError::THREAD_ERROR;
\r
4970 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4973 else if ( !CloseHandle( threadHandle ) ) {
\r
4974 errorType = RtAudioError::THREAD_ERROR;
\r
4975 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4979 callbackStopped = true;
\r
4981 else if ( callbackResult == 2 ) {
\r
4982 // instantiate a thread to stop this thread
\r
4983 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4984 if ( !threadHandle ) {
\r
4985 errorType = RtAudioError::THREAD_ERROR;
\r
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4989 else if ( !CloseHandle( threadHandle ) ) {
\r
4990 errorType = RtAudioError::THREAD_ERROR;
\r
4991 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4995 callbackStopped = true;
\r
5000 // Callback Output
\r
5001 // ===============
\r
5002 // 1. Convert callback buffer to stream format
\r
5003 // 2. Convert callback buffer to stream sample rate and channel count
\r
5004 // 3. Push callback buffer into outputBuffer
\r
5006 if ( renderAudioClient && callbackPulled ) {
\r
5007 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5008 // Convert callback buffer to stream format
\r
5009 convertBuffer( stream_.deviceBuffer,
\r
5010 stream_.userBuffer[OUTPUT],
\r
5011 stream_.convertInfo[OUTPUT] );
\r
5015 // Convert callback buffer to stream sample rate
\r
5016 convertBufferWasapi( convBuffer,
\r
5017 stream_.deviceBuffer,
\r
5018 stream_.nDeviceChannels[OUTPUT],
\r
5019 stream_.sampleRate,
\r
5020 renderFormat->nSamplesPerSec,
\r
5021 stream_.bufferSize,
\r
5023 stream_.deviceFormat[OUTPUT] );
\r
5025 // Push callback buffer into outputBuffer
\r
5026 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5027 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5028 stream_.deviceFormat[OUTPUT] );
\r
5031 // if there is no render stream, set callbackPushed flag
\r
5032 callbackPushed = true;
\r
5037 // 1. Get capture buffer from stream
\r
5038 // 2. Push capture buffer into inputBuffer
\r
5039 // 3. If 2. was successful: Release capture buffer
\r
5041 if ( captureAudioClient ) {
\r
5042 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5043 if ( !callbackPulled ) {
\r
5044 WaitForSingleObject( captureEvent, INFINITE );
\r
5047 // Get capture buffer from stream
\r
5048 hr = captureClient->GetBuffer( &streamBuffer,
\r
5049 &bufferFrameCount,
\r
5050 &captureFlags, NULL, NULL );
\r
5051 if ( FAILED( hr ) ) {
\r
5052 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5056 if ( bufferFrameCount != 0 ) {
\r
5057 // Push capture buffer into inputBuffer
\r
5058 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5059 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5060 stream_.deviceFormat[INPUT] ) )
\r
5062 // Release capture buffer
\r
5063 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5064 if ( FAILED( hr ) ) {
\r
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5071 // Inform WASAPI that capture was unsuccessful
\r
5072 hr = captureClient->ReleaseBuffer( 0 );
\r
5073 if ( FAILED( hr ) ) {
\r
5074 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5081 // Inform WASAPI that capture was unsuccessful
\r
5082 hr = captureClient->ReleaseBuffer( 0 );
\r
5083 if ( FAILED( hr ) ) {
\r
5084 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5092 // 1. Get render buffer from stream
\r
5093 // 2. Pull next buffer from outputBuffer
\r
5094 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5095 // Release render buffer
\r
5097 if ( renderAudioClient ) {
\r
5098 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5099 if ( callbackPulled && !callbackPushed ) {
\r
5100 WaitForSingleObject( renderEvent, INFINITE );
\r
5103 // Get render buffer from stream
\r
5104 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5105 if ( FAILED( hr ) ) {
\r
5106 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5110 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5111 if ( FAILED( hr ) ) {
\r
5112 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5116 bufferFrameCount -= numFramesPadding;
\r
5118 if ( bufferFrameCount != 0 ) {
\r
5119 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5120 if ( FAILED( hr ) ) {
\r
5121 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5125 // Pull next buffer from outputBuffer
\r
5126 // Fill render buffer with next buffer
\r
5127 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5128 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5129 stream_.deviceFormat[OUTPUT] ) )
\r
5131 // Release render buffer
\r
5132 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5133 if ( FAILED( hr ) ) {
\r
5134 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5140 // Inform WASAPI that render was unsuccessful
\r
5141 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5142 if ( FAILED( hr ) ) {
\r
5143 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5150 // Inform WASAPI that render was unsuccessful
\r
5151 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5152 if ( FAILED( hr ) ) {
\r
5153 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5159 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5160 if ( callbackPushed ) {
\r
5161 callbackPulled = false;
\r
5164 // tick stream time
\r
5165 RtApi::tickStreamTime();
\r
5170 CoTaskMemFree( captureFormat );
\r
5171 CoTaskMemFree( renderFormat );
\r
5173 free ( convBuffer );
\r
5177 // update stream state
\r
5178 stream_.state = STREAM_STOPPED;
\r
5180 if ( errorText_.empty() )
\r
5183 error( errorType );
\r
5186 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5190 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5192 // Modified by Robin Davies, October 2005
\r
5193 // - Improvements to DirectX pointer chasing.
\r
5194 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5195 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5196 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5197 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5199 #include <dsound.h>
\r
5200 #include <assert.h>
\r
5201 #include <algorithm>
\r
5203 #if defined(__MINGW32__)
\r
5204 // missing from latest mingw winapi
\r
5205 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5206 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5207 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5208 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5211 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5213 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5214 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5217 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5219 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5220 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5221 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5222 return pointer >= earlierPointer && pointer < laterPointer;
\r
5225 // A structure to hold various information related to the DirectSound
\r
5226 // API implementation.
\r
5228 unsigned int drainCounter; // Tracks callback counts when draining
\r
5229 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5233 UINT bufferPointer[2];
\r
5234 DWORD dsBufferSize[2];
\r
5235 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5239 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5242 // Declarations for utility functions, callbacks, and structures
\r
5243 // specific to the DirectSound implementation.
\r
5244 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5245 LPCTSTR description,
\r
5247 LPVOID lpContext );
\r
5249 static const char* getErrorString( int code );
\r
5251 static unsigned __stdcall callbackHandler( void *ptr );
\r
5260 : found(false) { validId[0] = false; validId[1] = false; }
\r
5263 struct DsProbeData {
\r
5265 std::vector<struct DsDevice>* dsDevices;
\r
5268 RtApiDs :: RtApiDs()
\r
5270 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5271 // accept whatever the mainline chose for a threading model.
\r
5272 coInitialized_ = false;
\r
5273 HRESULT hr = CoInitialize( NULL );
\r
5274 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5277 RtApiDs :: ~RtApiDs()
\r
5279 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5280 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5283 // The DirectSound default output is always the first device.
\r
5284 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5289 // The DirectSound default input is always the first input device,
\r
5290 // which is the first capture device enumerated.
\r
5291 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5296 unsigned int RtApiDs :: getDeviceCount( void )
\r
5298 // Set query flag for previously found devices to false, so that we
\r
5299 // can check for any devices that have disappeared.
\r
5300 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5301 dsDevices[i].found = false;
\r
5303 // Query DirectSound devices.
\r
5304 struct DsProbeData probeInfo;
\r
5305 probeInfo.isInput = false;
\r
5306 probeInfo.dsDevices = &dsDevices;
\r
5307 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5308 if ( FAILED( result ) ) {
\r
5309 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5310 errorText_ = errorStream_.str();
\r
5311 error( RtAudioError::WARNING );
\r
5314 // Query DirectSoundCapture devices.
\r
5315 probeInfo.isInput = true;
\r
5316 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5317 if ( FAILED( result ) ) {
\r
5318 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5319 errorText_ = errorStream_.str();
\r
5320 error( RtAudioError::WARNING );
\r
5323 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5324 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5325 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5329 return static_cast<unsigned int>(dsDevices.size());
\r
5332 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5334 RtAudio::DeviceInfo info;
\r
5335 info.probed = false;
\r
5337 if ( dsDevices.size() == 0 ) {
\r
5338 // Force a query of all devices
\r
5340 if ( dsDevices.size() == 0 ) {
\r
5341 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5342 error( RtAudioError::INVALID_USE );
\r
5347 if ( device >= dsDevices.size() ) {
\r
5348 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5349 error( RtAudioError::INVALID_USE );
\r
5354 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5356 LPDIRECTSOUND output;
\r
5358 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5359 if ( FAILED( result ) ) {
\r
5360 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5361 errorText_ = errorStream_.str();
\r
5362 error( RtAudioError::WARNING );
\r
5366 outCaps.dwSize = sizeof( outCaps );
\r
5367 result = output->GetCaps( &outCaps );
\r
5368 if ( FAILED( result ) ) {
\r
5369 output->Release();
\r
5370 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5371 errorText_ = errorStream_.str();
\r
5372 error( RtAudioError::WARNING );
\r
5376 // Get output channel information.
\r
5377 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5379 // Get sample rate information.
\r
5380 info.sampleRates.clear();
\r
5381 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5382 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5383 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5384 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5386 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5387 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5391 // Get format information.
\r
5392 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5393 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5395 output->Release();
\r
5397 if ( getDefaultOutputDevice() == device )
\r
5398 info.isDefaultOutput = true;
\r
5400 if ( dsDevices[ device ].validId[1] == false ) {
\r
5401 info.name = dsDevices[ device ].name;
\r
5402 info.probed = true;
\r
5408 LPDIRECTSOUNDCAPTURE input;
\r
5409 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5410 if ( FAILED( result ) ) {
\r
5411 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5412 errorText_ = errorStream_.str();
\r
5413 error( RtAudioError::WARNING );
\r
5418 inCaps.dwSize = sizeof( inCaps );
\r
5419 result = input->GetCaps( &inCaps );
\r
5420 if ( FAILED( result ) ) {
\r
5422 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5423 errorText_ = errorStream_.str();
\r
5424 error( RtAudioError::WARNING );
\r
5428 // Get input channel information.
\r
5429 info.inputChannels = inCaps.dwChannels;
\r
5431 // Get sample rate and format information.
\r
5432 std::vector<unsigned int> rates;
\r
5433 if ( inCaps.dwChannels >= 2 ) {
\r
5434 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5441 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5443 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5444 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5446 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5447 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5449 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5453 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5456 else if ( inCaps.dwChannels == 1 ) {
\r
5457 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5458 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5459 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5460 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5461 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5462 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5463 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5464 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5466 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5467 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5468 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5469 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5470 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5472 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5473 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5474 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5475 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5476 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5479 else info.inputChannels = 0; // technically, this would be an error
\r
5483 if ( info.inputChannels == 0 ) return info;
\r
5485 // Copy the supported rates to the info structure but avoid duplication.
\r
5487 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5489 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5490 if ( rates[i] == info.sampleRates[j] ) {
\r
5495 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5497 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5499 // If device opens for both playback and capture, we determine the channels.
\r
5500 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5501 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5503 if ( device == 0 ) info.isDefaultInput = true;
\r
5505 // Copy name and return.
\r
5506 info.name = dsDevices[ device ].name;
\r
5507 info.probed = true;
\r
5511 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5512 unsigned int firstChannel, unsigned int sampleRate,
\r
5513 RtAudioFormat format, unsigned int *bufferSize,
\r
5514 RtAudio::StreamOptions *options )
\r
5516 if ( channels + firstChannel > 2 ) {
\r
5517 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5521 size_t nDevices = dsDevices.size();
\r
5522 if ( nDevices == 0 ) {
\r
5523 // This should not happen because a check is made before this function is called.
\r
5524 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5528 if ( device >= nDevices ) {
\r
5529 // This should not happen because a check is made before this function is called.
\r
5530 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5534 if ( mode == OUTPUT ) {
\r
5535 if ( dsDevices[ device ].validId[0] == false ) {
\r
5536 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5537 errorText_ = errorStream_.str();
\r
5541 else { // mode == INPUT
\r
5542 if ( dsDevices[ device ].validId[1] == false ) {
\r
5543 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5544 errorText_ = errorStream_.str();
\r
5549 // According to a note in PortAudio, using GetDesktopWindow()
\r
5550 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5551 // that occur when the application's window is not the foreground
\r
5552 // window. Also, if the application window closes before the
\r
5553 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5554 // problems when using GetDesktopWindow() but it seems fine now
\r
5555 // (January 2010). I'll leave it commented here.
\r
5556 // HWND hWnd = GetForegroundWindow();
\r
5557 HWND hWnd = GetDesktopWindow();
\r
5559 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5560 // two. This is a judgement call and a value of two is probably too
\r
5561 // low for capture, but it should work for playback.
\r
5563 if ( options ) nBuffers = options->numberOfBuffers;
\r
5564 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5565 if ( nBuffers < 2 ) nBuffers = 3;
\r
5567 // Check the lower range of the user-specified buffer size and set
\r
5568 // (arbitrarily) to a lower bound of 32.
\r
5569 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5571 // Create the wave format structure. The data format setting will
\r
5572 // be determined later.
\r
5573 WAVEFORMATEX waveFormat;
\r
5574 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5575 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5576 waveFormat.nChannels = channels + firstChannel;
\r
5577 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5579 // Determine the device buffer size. By default, we'll use the value
\r
5580 // defined above (32K), but we will grow it to make allowances for
\r
5581 // very large software buffer sizes.
\r
5582 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5583 DWORD dsPointerLeadTime = 0;
\r
5585 void *ohandle = 0, *bhandle = 0;
\r
5587 if ( mode == OUTPUT ) {
\r
5589 LPDIRECTSOUND output;
\r
5590 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5591 if ( FAILED( result ) ) {
\r
5592 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5593 errorText_ = errorStream_.str();
\r
5598 outCaps.dwSize = sizeof( outCaps );
\r
5599 result = output->GetCaps( &outCaps );
\r
5600 if ( FAILED( result ) ) {
\r
5601 output->Release();
\r
5602 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5603 errorText_ = errorStream_.str();
\r
5607 // Check channel information.
\r
5608 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5609 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5610 errorText_ = errorStream_.str();
\r
5614 // Check format information. Use 16-bit format unless not
\r
5615 // supported or user requests 8-bit.
\r
5616 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5617 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5618 waveFormat.wBitsPerSample = 16;
\r
5619 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5622 waveFormat.wBitsPerSample = 8;
\r
5623 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5625 stream_.userFormat = format;
\r
5627 // Update wave format structure and buffer information.
\r
5628 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5629 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5630 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5632 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5633 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5634 dsBufferSize *= 2;
\r
5636 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5637 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5638 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5639 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5640 if ( FAILED( result ) ) {
\r
5641 output->Release();
\r
5642 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5643 errorText_ = errorStream_.str();
\r
5647 // Even though we will write to the secondary buffer, we need to
\r
5648 // access the primary buffer to set the correct output format
\r
5649 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5650 // buffer description.
\r
5651 DSBUFFERDESC bufferDescription;
\r
5652 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5653 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5654 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5656 // Obtain the primary buffer
\r
5657 LPDIRECTSOUNDBUFFER buffer;
\r
5658 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5659 if ( FAILED( result ) ) {
\r
5660 output->Release();
\r
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5662 errorText_ = errorStream_.str();
\r
5666 // Set the primary DS buffer sound format.
\r
5667 result = buffer->SetFormat( &waveFormat );
\r
5668 if ( FAILED( result ) ) {
\r
5669 output->Release();
\r
5670 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5671 errorText_ = errorStream_.str();
\r
5675 // Setup the secondary DS buffer description.
\r
5676 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5677 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5678 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5679 DSBCAPS_GLOBALFOCUS |
\r
5680 DSBCAPS_GETCURRENTPOSITION2 |
\r
5681 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5682 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5683 bufferDescription.lpwfxFormat = &waveFormat;
\r
5685 // Try to create the secondary DS buffer. If that doesn't work,
\r
5686 // try to use software mixing. Otherwise, there's a problem.
\r
5687 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5688 if ( FAILED( result ) ) {
\r
5689 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5690 DSBCAPS_GLOBALFOCUS |
\r
5691 DSBCAPS_GETCURRENTPOSITION2 |
\r
5692 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5693 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5694 if ( FAILED( result ) ) {
\r
5695 output->Release();
\r
5696 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5697 errorText_ = errorStream_.str();
\r
5702 // Get the buffer size ... might be different from what we specified.
\r
5704 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5705 result = buffer->GetCaps( &dsbcaps );
\r
5706 if ( FAILED( result ) ) {
\r
5707 output->Release();
\r
5708 buffer->Release();
\r
5709 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5710 errorText_ = errorStream_.str();
\r
5714 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5716 // Lock the DS buffer
\r
5719 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5720 if ( FAILED( result ) ) {
\r
5721 output->Release();
\r
5722 buffer->Release();
\r
5723 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5724 errorText_ = errorStream_.str();
\r
5728 // Zero the DS buffer
\r
5729 ZeroMemory( audioPtr, dataLen );
\r
5731 // Unlock the DS buffer
\r
5732 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5733 if ( FAILED( result ) ) {
\r
5734 output->Release();
\r
5735 buffer->Release();
\r
5736 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5737 errorText_ = errorStream_.str();
\r
5741 ohandle = (void *) output;
\r
5742 bhandle = (void *) buffer;
\r
5745 if ( mode == INPUT ) {
\r
5747 LPDIRECTSOUNDCAPTURE input;
\r
5748 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5749 if ( FAILED( result ) ) {
\r
5750 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5751 errorText_ = errorStream_.str();
\r
5756 inCaps.dwSize = sizeof( inCaps );
\r
5757 result = input->GetCaps( &inCaps );
\r
5758 if ( FAILED( result ) ) {
\r
5760 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5761 errorText_ = errorStream_.str();
\r
5765 // Check channel information.
\r
5766 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5767 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5771 // Check format information. Use 16-bit format unless user
\r
5772 // requests 8-bit.
\r
5773 DWORD deviceFormats;
\r
5774 if ( channels + firstChannel == 2 ) {
\r
5775 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5776 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5777 waveFormat.wBitsPerSample = 8;
\r
5778 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5780 else { // assume 16-bit is supported
\r
5781 waveFormat.wBitsPerSample = 16;
\r
5782 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5785 else { // channel == 1
\r
5786 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5787 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5788 waveFormat.wBitsPerSample = 8;
\r
5789 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5791 else { // assume 16-bit is supported
\r
5792 waveFormat.wBitsPerSample = 16;
\r
5793 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5796 stream_.userFormat = format;
\r
5798 // Update wave format structure and buffer information.
\r
5799 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5800 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5801 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5803 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5804 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5805 dsBufferSize *= 2;
\r
5807 // Setup the secondary DS buffer description.
\r
5808 DSCBUFFERDESC bufferDescription;
\r
5809 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5810 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5811 bufferDescription.dwFlags = 0;
\r
5812 bufferDescription.dwReserved = 0;
\r
5813 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5814 bufferDescription.lpwfxFormat = &waveFormat;
\r
5816 // Create the capture buffer.
\r
5817 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5818 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5819 if ( FAILED( result ) ) {
\r
5821 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5822 errorText_ = errorStream_.str();
\r
5826 // Get the buffer size ... might be different from what we specified.
\r
5827 DSCBCAPS dscbcaps;
\r
5828 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5829 result = buffer->GetCaps( &dscbcaps );
\r
5830 if ( FAILED( result ) ) {
\r
5832 buffer->Release();
\r
5833 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5834 errorText_ = errorStream_.str();
\r
5838 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5840 // NOTE: We could have a problem here if this is a duplex stream
\r
5841 // and the play and capture hardware buffer sizes are different
\r
5842 // (I'm actually not sure if that is a problem or not).
\r
5843 // Currently, we are not verifying that.
\r
5845 // Lock the capture buffer
\r
5848 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5849 if ( FAILED( result ) ) {
\r
5851 buffer->Release();
\r
5852 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5853 errorText_ = errorStream_.str();
\r
5857 // Zero the buffer
\r
5858 ZeroMemory( audioPtr, dataLen );
\r
5860 // Unlock the buffer
\r
5861 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5862 if ( FAILED( result ) ) {
\r
5864 buffer->Release();
\r
5865 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5866 errorText_ = errorStream_.str();
\r
5870 ohandle = (void *) input;
\r
5871 bhandle = (void *) buffer;
\r
5874 // Set various stream parameters
\r
5875 DsHandle *handle = 0;
\r
5876 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5877 stream_.nUserChannels[mode] = channels;
\r
5878 stream_.bufferSize = *bufferSize;
\r
5879 stream_.channelOffset[mode] = firstChannel;
\r
5880 stream_.deviceInterleaved[mode] = true;
\r
5881 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5882 else stream_.userInterleaved = true;
\r
5884 // Set flag for buffer conversion
\r
5885 stream_.doConvertBuffer[mode] = false;
\r
5886 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5887 stream_.doConvertBuffer[mode] = true;
\r
5888 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5889 stream_.doConvertBuffer[mode] = true;
\r
5890 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5891 stream_.nUserChannels[mode] > 1 )
\r
5892 stream_.doConvertBuffer[mode] = true;
\r
5894 // Allocate necessary internal buffers
\r
5895 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5896 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5897 if ( stream_.userBuffer[mode] == NULL ) {
\r
5898 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5902 if ( stream_.doConvertBuffer[mode] ) {
\r
5904 bool makeBuffer = true;
\r
5905 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5906 if ( mode == INPUT ) {
\r
5907 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5908 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5909 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5913 if ( makeBuffer ) {
\r
5914 bufferBytes *= *bufferSize;
\r
5915 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5916 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5917 if ( stream_.deviceBuffer == NULL ) {
\r
5918 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5924 // Allocate our DsHandle structures for the stream.
\r
5925 if ( stream_.apiHandle == 0 ) {
\r
5927 handle = new DsHandle;
\r
5929 catch ( std::bad_alloc& ) {
\r
5930 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5934 // Create a manual-reset event.
\r
5935 handle->condition = CreateEvent( NULL, // no security
\r
5936 TRUE, // manual-reset
\r
5937 FALSE, // non-signaled initially
\r
5938 NULL ); // unnamed
\r
5939 stream_.apiHandle = (void *) handle;
\r
5942 handle = (DsHandle *) stream_.apiHandle;
\r
5943 handle->id[mode] = ohandle;
\r
5944 handle->buffer[mode] = bhandle;
\r
5945 handle->dsBufferSize[mode] = dsBufferSize;
\r
5946 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5948 stream_.device[mode] = device;
\r
5949 stream_.state = STREAM_STOPPED;
\r
5950 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5951 // We had already set up an output stream.
\r
5952 stream_.mode = DUPLEX;
\r
5954 stream_.mode = mode;
\r
5955 stream_.nBuffers = nBuffers;
\r
5956 stream_.sampleRate = sampleRate;
\r
5958 // Setup the buffer conversion information structure.
\r
5959 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5961 // Setup the callback thread.
\r
5962 if ( stream_.callbackInfo.isRunning == false ) {
\r
5963 unsigned threadId;
\r
5964 stream_.callbackInfo.isRunning = true;
\r
5965 stream_.callbackInfo.object = (void *) this;
\r
5966 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5967 &stream_.callbackInfo, 0, &threadId );
\r
5968 if ( stream_.callbackInfo.thread == 0 ) {
\r
5969 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5973 // Boost DS thread priority
\r
5974 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5980 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5981 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5982 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5983 if ( buffer ) buffer->Release();
\r
5984 object->Release();
\r
5986 if ( handle->buffer[1] ) {
\r
5987 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5988 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5989 if ( buffer ) buffer->Release();
\r
5990 object->Release();
\r
5992 CloseHandle( handle->condition );
\r
5994 stream_.apiHandle = 0;
\r
5997 for ( int i=0; i<2; i++ ) {
\r
5998 if ( stream_.userBuffer[i] ) {
\r
5999 free( stream_.userBuffer[i] );
\r
6000 stream_.userBuffer[i] = 0;
\r
6004 if ( stream_.deviceBuffer ) {
\r
6005 free( stream_.deviceBuffer );
\r
6006 stream_.deviceBuffer = 0;
\r
6009 stream_.state = STREAM_CLOSED;
\r
6013 void RtApiDs :: closeStream()
\r
6015 if ( stream_.state == STREAM_CLOSED ) {
\r
6016 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6017 error( RtAudioError::WARNING );
\r
6021 // Stop the callback thread.
\r
6022 stream_.callbackInfo.isRunning = false;
\r
6023 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6024 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6026 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6028 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6029 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6030 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6033 buffer->Release();
\r
6035 object->Release();
\r
6037 if ( handle->buffer[1] ) {
\r
6038 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6039 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6042 buffer->Release();
\r
6044 object->Release();
\r
6046 CloseHandle( handle->condition );
\r
6048 stream_.apiHandle = 0;
\r
6051 for ( int i=0; i<2; i++ ) {
\r
6052 if ( stream_.userBuffer[i] ) {
\r
6053 free( stream_.userBuffer[i] );
\r
6054 stream_.userBuffer[i] = 0;
\r
6058 if ( stream_.deviceBuffer ) {
\r
6059 free( stream_.deviceBuffer );
\r
6060 stream_.deviceBuffer = 0;
\r
6063 stream_.mode = UNINITIALIZED;
\r
6064 stream_.state = STREAM_CLOSED;
\r
6067 void RtApiDs :: startStream()
\r
6070 if ( stream_.state == STREAM_RUNNING ) {
\r
6071 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6072 error( RtAudioError::WARNING );
\r
6076 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6078 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6079 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6080 // this is already in effect.
\r
6081 timeBeginPeriod( 1 );
\r
6083 buffersRolling = false;
\r
6084 duplexPrerollBytes = 0;
\r
6086 if ( stream_.mode == DUPLEX ) {
\r
6087 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6088 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6091 HRESULT result = 0;
\r
6092 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6094 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6095 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6096 if ( FAILED( result ) ) {
\r
6097 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6098 errorText_ = errorStream_.str();
\r
6103 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6105 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6106 result = buffer->Start( DSCBSTART_LOOPING );
\r
6107 if ( FAILED( result ) ) {
\r
6108 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6109 errorText_ = errorStream_.str();
\r
6114 handle->drainCounter = 0;
\r
6115 handle->internalDrain = false;
\r
6116 ResetEvent( handle->condition );
\r
6117 stream_.state = STREAM_RUNNING;
\r
6120 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6123 void RtApiDs :: stopStream()
\r
6126 if ( stream_.state == STREAM_STOPPED ) {
\r
6127 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6128 error( RtAudioError::WARNING );
\r
6132 HRESULT result = 0;
\r
6135 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6136 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6137 if ( handle->drainCounter == 0 ) {
\r
6138 handle->drainCounter = 2;
\r
6139 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6142 stream_.state = STREAM_STOPPED;
\r
6144 MUTEX_LOCK( &stream_.mutex );
\r
6146 // Stop the buffer and clear memory
\r
6147 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6148 result = buffer->Stop();
\r
6149 if ( FAILED( result ) ) {
\r
6150 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6151 errorText_ = errorStream_.str();
\r
6155 // Lock the buffer and clear it so that if we start to play again,
\r
6156 // we won't have old data playing.
\r
6157 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6158 if ( FAILED( result ) ) {
\r
6159 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6160 errorText_ = errorStream_.str();
\r
6164 // Zero the DS buffer
\r
6165 ZeroMemory( audioPtr, dataLen );
\r
6167 // Unlock the DS buffer
\r
6168 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6169 if ( FAILED( result ) ) {
\r
6170 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6171 errorText_ = errorStream_.str();
\r
6175 // If we start playing again, we must begin at beginning of buffer.
\r
6176 handle->bufferPointer[0] = 0;
\r
6179 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6180 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6184 stream_.state = STREAM_STOPPED;
\r
6186 if ( stream_.mode != DUPLEX )
\r
6187 MUTEX_LOCK( &stream_.mutex );
\r
6189 result = buffer->Stop();
\r
6190 if ( FAILED( result ) ) {
\r
6191 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6192 errorText_ = errorStream_.str();
\r
6196 // Lock the buffer and clear it so that if we start to play again,
\r
6197 // we won't have old data playing.
\r
6198 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6199 if ( FAILED( result ) ) {
\r
6200 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6201 errorText_ = errorStream_.str();
\r
6205 // Zero the DS buffer
\r
6206 ZeroMemory( audioPtr, dataLen );
\r
6208 // Unlock the DS buffer
\r
6209 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6210 if ( FAILED( result ) ) {
\r
6211 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6212 errorText_ = errorStream_.str();
\r
6216 // If we start recording again, we must begin at beginning of buffer.
\r
6217 handle->bufferPointer[1] = 0;
\r
6221 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6222 MUTEX_UNLOCK( &stream_.mutex );
\r
6224 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6227 void RtApiDs :: abortStream()
\r
6230 if ( stream_.state == STREAM_STOPPED ) {
\r
6231 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6232 error( RtAudioError::WARNING );
\r
6236 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6237 handle->drainCounter = 2;
\r
6242 void RtApiDs :: callbackEvent()
\r
6244 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6245 Sleep( 50 ); // sleep 50 milliseconds
\r
6249 if ( stream_.state == STREAM_CLOSED ) {
\r
6250 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6251 error( RtAudioError::WARNING );
\r
6255 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6256 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6258 // Check if we were draining the stream and signal is finished.
\r
6259 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6261 stream_.state = STREAM_STOPPING;
\r
6262 if ( handle->internalDrain == false )
\r
6263 SetEvent( handle->condition );
\r
6269 // Invoke user callback to get fresh output data UNLESS we are
\r
6270 // draining stream.
\r
6271 if ( handle->drainCounter == 0 ) {
\r
6272 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6273 double streamTime = getStreamTime();
\r
6274 RtAudioStreamStatus status = 0;
\r
6275 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6276 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6277 handle->xrun[0] = false;
\r
6279 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6280 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6281 handle->xrun[1] = false;
\r
6283 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6284 stream_.bufferSize, streamTime, status, info->userData );
\r
6285 if ( cbReturnValue == 2 ) {
\r
6286 stream_.state = STREAM_STOPPING;
\r
6287 handle->drainCounter = 2;
\r
6291 else if ( cbReturnValue == 1 ) {
\r
6292 handle->drainCounter = 1;
\r
6293 handle->internalDrain = true;
\r
6298 DWORD currentWritePointer, safeWritePointer;
\r
6299 DWORD currentReadPointer, safeReadPointer;
\r
6300 UINT nextWritePointer;
\r
6302 LPVOID buffer1 = NULL;
\r
6303 LPVOID buffer2 = NULL;
\r
6304 DWORD bufferSize1 = 0;
\r
6305 DWORD bufferSize2 = 0;
\r
6310 MUTEX_LOCK( &stream_.mutex );
\r
6311 if ( stream_.state == STREAM_STOPPED ) {
\r
6312 MUTEX_UNLOCK( &stream_.mutex );
\r
6316 if ( buffersRolling == false ) {
\r
6317 if ( stream_.mode == DUPLEX ) {
\r
6318 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6320 // It takes a while for the devices to get rolling. As a result,
\r
6321 // there's no guarantee that the capture and write device pointers
\r
6322 // will move in lockstep. Wait here for both devices to start
\r
6323 // rolling, and then set our buffer pointers accordingly.
\r
6324 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6325 // bytes later than the write buffer.
\r
6327 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6328 // take place between the two GetCurrentPosition calls... but I'm
\r
6329 // really not sure how to solve the problem. Temporarily boost to
\r
6330 // Realtime priority, maybe; but I'm not sure what priority the
\r
6331 // DirectSound service threads run at. We *should* be roughly
\r
6332 // within a ms or so of correct.
\r
6334 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6335 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6337 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6339 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6340 if ( FAILED( result ) ) {
\r
6341 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6342 errorText_ = errorStream_.str();
\r
6343 MUTEX_UNLOCK( &stream_.mutex );
\r
6344 error( RtAudioError::SYSTEM_ERROR );
\r
6347 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6348 if ( FAILED( result ) ) {
\r
6349 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6350 errorText_ = errorStream_.str();
\r
6351 MUTEX_UNLOCK( &stream_.mutex );
\r
6352 error( RtAudioError::SYSTEM_ERROR );
\r
6356 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6357 if ( FAILED( result ) ) {
\r
6358 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6359 errorText_ = errorStream_.str();
\r
6360 MUTEX_UNLOCK( &stream_.mutex );
\r
6361 error( RtAudioError::SYSTEM_ERROR );
\r
6364 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6365 if ( FAILED( result ) ) {
\r
6366 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6367 errorText_ = errorStream_.str();
\r
6368 MUTEX_UNLOCK( &stream_.mutex );
\r
6369 error( RtAudioError::SYSTEM_ERROR );
\r
6372 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6376 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6378 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6379 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6380 handle->bufferPointer[1] = safeReadPointer;
\r
6382 else if ( stream_.mode == OUTPUT ) {
\r
6384 // Set the proper nextWritePosition after initial startup.
\r
6385 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6386 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6387 if ( FAILED( result ) ) {
\r
6388 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6389 errorText_ = errorStream_.str();
\r
6390 MUTEX_UNLOCK( &stream_.mutex );
\r
6391 error( RtAudioError::SYSTEM_ERROR );
\r
6394 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6395 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6398 buffersRolling = true;
\r
6401 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6403 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6405 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6406 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6407 bufferBytes *= formatBytes( stream_.userFormat );
\r
6408 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6411 // Setup parameters and do buffer conversion if necessary.
\r
6412 if ( stream_.doConvertBuffer[0] ) {
\r
6413 buffer = stream_.deviceBuffer;
\r
6414 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6415 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6416 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6419 buffer = stream_.userBuffer[0];
\r
6420 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6421 bufferBytes *= formatBytes( stream_.userFormat );
\r
6424 // No byte swapping necessary in DirectSound implementation.
\r
6426 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6427 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6429 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6430 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6432 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6433 nextWritePointer = handle->bufferPointer[0];
\r
6435 DWORD endWrite, leadPointer;
\r
6437 // Find out where the read and "safe write" pointers are.
\r
6438 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6439 if ( FAILED( result ) ) {
\r
6440 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6441 errorText_ = errorStream_.str();
\r
6442 error( RtAudioError::SYSTEM_ERROR );
\r
6446 // We will copy our output buffer into the region between
\r
6447 // safeWritePointer and leadPointer. If leadPointer is not
\r
6448 // beyond the next endWrite position, wait until it is.
\r
6449 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6450 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6451 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6452 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6453 endWrite = nextWritePointer + bufferBytes;
\r
6455 // Check whether the entire write region is behind the play pointer.
\r
6456 if ( leadPointer >= endWrite ) break;
\r
6458 // If we are here, then we must wait until the leadPointer advances
\r
6459 // beyond the end of our next write region. We use the
\r
6460 // Sleep() function to suspend operation until that happens.
\r
6461 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6462 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6463 if ( millis < 1.0 ) millis = 1.0;
\r
6464 Sleep( (DWORD) millis );
\r
6467 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6468 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6469 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6470 handle->xrun[0] = true;
\r
6471 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6472 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6473 handle->bufferPointer[0] = nextWritePointer;
\r
6474 endWrite = nextWritePointer + bufferBytes;
\r
6477 // Lock free space in the buffer
\r
6478 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6479 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6480 if ( FAILED( result ) ) {
\r
6481 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6482 errorText_ = errorStream_.str();
\r
6483 MUTEX_UNLOCK( &stream_.mutex );
\r
6484 error( RtAudioError::SYSTEM_ERROR );
\r
6488 // Copy our buffer into the DS buffer
\r
6489 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6490 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6492 // Update our buffer offset and unlock sound buffer
\r
6493 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6494 if ( FAILED( result ) ) {
\r
6495 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6496 errorText_ = errorStream_.str();
\r
6497 MUTEX_UNLOCK( &stream_.mutex );
\r
6498 error( RtAudioError::SYSTEM_ERROR );
\r
6501 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6502 handle->bufferPointer[0] = nextWritePointer;
\r
6505 // Don't bother draining input
\r
6506 if ( handle->drainCounter ) {
\r
6507 handle->drainCounter++;
\r
6511 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6513 // Setup parameters.
\r
6514 if ( stream_.doConvertBuffer[1] ) {
\r
6515 buffer = stream_.deviceBuffer;
\r
6516 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6517 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6520 buffer = stream_.userBuffer[1];
\r
6521 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6522 bufferBytes *= formatBytes( stream_.userFormat );
\r
6525 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6526 long nextReadPointer = handle->bufferPointer[1];
\r
6527 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6529 // Find out where the write and "safe read" pointers are.
\r
6530 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6531 if ( FAILED( result ) ) {
\r
6532 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6533 errorText_ = errorStream_.str();
\r
6534 MUTEX_UNLOCK( &stream_.mutex );
\r
6535 error( RtAudioError::SYSTEM_ERROR );
\r
6539 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6540 DWORD endRead = nextReadPointer + bufferBytes;
\r
6542 // Handling depends on whether we are INPUT or DUPLEX.
\r
6543 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6544 // then a wait here will drag the write pointers into the forbidden zone.
\r
6546 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6547 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6548 // practical way to sync up the read and write pointers reliably, given the
\r
6549 // the very complex relationship between phase and increment of the read and write
\r
6552 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6553 // provide a pre-roll period of 0.5 seconds in which we return
\r
6554 // zeros from the read buffer while the pointers sync up.
\r
6556 if ( stream_.mode == DUPLEX ) {
\r
6557 if ( safeReadPointer < endRead ) {
\r
6558 if ( duplexPrerollBytes <= 0 ) {
\r
6559 // Pre-roll time over. Be more agressive.
\r
6560 int adjustment = endRead-safeReadPointer;
\r
6562 handle->xrun[1] = true;
\r
6564 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6565 // and perform fine adjustments later.
\r
6566 // - small adjustments: back off by twice as much.
\r
6567 if ( adjustment >= 2*bufferBytes )
\r
6568 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6570 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6572 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6576 // In pre=roll time. Just do it.
\r
6577 nextReadPointer = safeReadPointer - bufferBytes;
\r
6578 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6580 endRead = nextReadPointer + bufferBytes;
\r
6583 else { // mode == INPUT
\r
6584 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6585 // See comments for playback.
\r
6586 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6587 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6588 if ( millis < 1.0 ) millis = 1.0;
\r
6589 Sleep( (DWORD) millis );
\r
6591 // Wake up and find out where we are now.
\r
6592 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6593 if ( FAILED( result ) ) {
\r
6594 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6595 errorText_ = errorStream_.str();
\r
6596 MUTEX_UNLOCK( &stream_.mutex );
\r
6597 error( RtAudioError::SYSTEM_ERROR );
\r
6601 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6605 // Lock free space in the buffer
\r
6606 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6607 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6608 if ( FAILED( result ) ) {
\r
6609 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6610 errorText_ = errorStream_.str();
\r
6611 MUTEX_UNLOCK( &stream_.mutex );
\r
6612 error( RtAudioError::SYSTEM_ERROR );
\r
6616 if ( duplexPrerollBytes <= 0 ) {
\r
6617 // Copy our buffer into the DS buffer
\r
6618 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6619 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6622 memset( buffer, 0, bufferSize1 );
\r
6623 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6624 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6627 // Update our buffer offset and unlock sound buffer
\r
6628 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6629 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6630 if ( FAILED( result ) ) {
\r
6631 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6632 errorText_ = errorStream_.str();
\r
6633 MUTEX_UNLOCK( &stream_.mutex );
\r
6634 error( RtAudioError::SYSTEM_ERROR );
\r
6637 handle->bufferPointer[1] = nextReadPointer;
\r
6639 // No byte swapping necessary in DirectSound implementation.
\r
6641 // If necessary, convert 8-bit data from unsigned to signed.
\r
6642 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6643 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6645 // Do buffer conversion if necessary.
\r
6646 if ( stream_.doConvertBuffer[1] )
\r
6647 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6651 MUTEX_UNLOCK( &stream_.mutex );
\r
6652 RtApi::tickStreamTime();
\r
6655 // Definitions for utility functions and callbacks
\r
6656 // specific to the DirectSound implementation.
\r
6658 static unsigned __stdcall callbackHandler( void *ptr )
\r
6660 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6661 RtApiDs *object = (RtApiDs *) info->object;
\r
6662 bool* isRunning = &info->isRunning;
\r
6664 while ( *isRunning == true ) {
\r
6665 object->callbackEvent();
\r
6668 _endthreadex( 0 );
\r
6672 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6673 LPCTSTR description,
\r
6674 LPCTSTR /*module*/,
\r
6675 LPVOID lpContext )
\r
6677 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6678 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6681 bool validDevice = false;
\r
6682 if ( probeInfo.isInput == true ) {
\r
6684 LPDIRECTSOUNDCAPTURE object;
\r
6686 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6687 if ( hr != DS_OK ) return TRUE;
\r
6689 caps.dwSize = sizeof(caps);
\r
6690 hr = object->GetCaps( &caps );
\r
6691 if ( hr == DS_OK ) {
\r
6692 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6693 validDevice = true;
\r
6695 object->Release();
\r
6699 LPDIRECTSOUND object;
\r
6700 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6701 if ( hr != DS_OK ) return TRUE;
\r
6703 caps.dwSize = sizeof(caps);
\r
6704 hr = object->GetCaps( &caps );
\r
6705 if ( hr == DS_OK ) {
\r
6706 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6707 validDevice = true;
\r
6709 object->Release();
\r
6712 // If good device, then save its name and guid.
\r
6713 std::string name = convertCharPointerToStdString( description );
\r
6714 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6715 if ( lpguid == NULL )
\r
6716 name = "Default Device";
\r
6717 if ( validDevice ) {
\r
6718 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6719 if ( dsDevices[i].name == name ) {
\r
6720 dsDevices[i].found = true;
\r
6721 if ( probeInfo.isInput ) {
\r
6722 dsDevices[i].id[1] = lpguid;
\r
6723 dsDevices[i].validId[1] = true;
\r
6726 dsDevices[i].id[0] = lpguid;
\r
6727 dsDevices[i].validId[0] = true;
\r
6734 device.name = name;
\r
6735 device.found = true;
\r
6736 if ( probeInfo.isInput ) {
\r
6737 device.id[1] = lpguid;
\r
6738 device.validId[1] = true;
\r
6741 device.id[0] = lpguid;
\r
6742 device.validId[0] = true;
\r
6744 dsDevices.push_back( device );
\r
6750 static const char* getErrorString( int code )
\r
6754 case DSERR_ALLOCATED:
\r
6755 return "Already allocated";
\r
6757 case DSERR_CONTROLUNAVAIL:
\r
6758 return "Control unavailable";
\r
6760 case DSERR_INVALIDPARAM:
\r
6761 return "Invalid parameter";
\r
6763 case DSERR_INVALIDCALL:
\r
6764 return "Invalid call";
\r
6766 case DSERR_GENERIC:
\r
6767 return "Generic error";
\r
6769 case DSERR_PRIOLEVELNEEDED:
\r
6770 return "Priority level needed";
\r
6772 case DSERR_OUTOFMEMORY:
\r
6773 return "Out of memory";
\r
6775 case DSERR_BADFORMAT:
\r
6776 return "The sample rate or the channel format is not supported";
\r
6778 case DSERR_UNSUPPORTED:
\r
6779 return "Not supported";
\r
6781 case DSERR_NODRIVER:
\r
6782 return "No driver";
\r
6784 case DSERR_ALREADYINITIALIZED:
\r
6785 return "Already initialized";
\r
6787 case DSERR_NOAGGREGATION:
\r
6788 return "No aggregation";
\r
6790 case DSERR_BUFFERLOST:
\r
6791 return "Buffer lost";
\r
6793 case DSERR_OTHERAPPHASPRIO:
\r
6794 return "Another application already has priority";
\r
6796 case DSERR_UNINITIALIZED:
\r
6797 return "Uninitialized";
\r
6800 return "DirectSound unknown error";
\r
6803 //******************** End of __WINDOWS_DS__ *********************//
\r
6807 #if defined(__LINUX_ALSA__)
\r
6809 #include <alsa/asoundlib.h>
\r
6810 #include <unistd.h>
\r
6812 // A structure to hold various information related to the ALSA API
\r
6813 // implementation.
\r
6814 struct AlsaHandle {
\r
6815 snd_pcm_t *handles[2];
\r
6816 bool synchronized;
\r
6818 pthread_cond_t runnable_cv;
\r
6822 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6825 static void *alsaCallbackHandler( void * ptr );
\r
6827 RtApiAlsa :: RtApiAlsa()
\r
6829 // Nothing to do here.
\r
6832 RtApiAlsa :: ~RtApiAlsa()
\r
6834 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6837 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6839 unsigned nDevices = 0;
\r
6840 int result, subdevice, card;
\r
6842 snd_ctl_t *handle;
\r
6844 // Count cards and devices
\r
6846 snd_card_next( &card );
\r
6847 while ( card >= 0 ) {
\r
6848 sprintf( name, "hw:%d", card );
\r
6849 result = snd_ctl_open( &handle, name, 0 );
\r
6850 if ( result < 0 ) {
\r
6851 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6852 errorText_ = errorStream_.str();
\r
6853 error( RtAudioError::WARNING );
\r
6858 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6859 if ( result < 0 ) {
\r
6860 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6861 errorText_ = errorStream_.str();
\r
6862 error( RtAudioError::WARNING );
\r
6865 if ( subdevice < 0 )
\r
6870 snd_ctl_close( handle );
\r
6871 snd_card_next( &card );
\r
6874 result = snd_ctl_open( &handle, "default", 0 );
\r
6875 if (result == 0) {
\r
6877 snd_ctl_close( handle );
\r
6883 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6885 RtAudio::DeviceInfo info;
\r
6886 info.probed = false;
\r
6888 unsigned nDevices = 0;
\r
6889 int result, subdevice, card;
\r
6891 snd_ctl_t *chandle;
\r
6893 // Count cards and devices
\r
6896 snd_card_next( &card );
\r
6897 while ( card >= 0 ) {
\r
6898 sprintf( name, "hw:%d", card );
\r
6899 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6900 if ( result < 0 ) {
\r
6901 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6902 errorText_ = errorStream_.str();
\r
6903 error( RtAudioError::WARNING );
\r
6908 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6909 if ( result < 0 ) {
\r
6910 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6911 errorText_ = errorStream_.str();
\r
6912 error( RtAudioError::WARNING );
\r
6915 if ( subdevice < 0 ) break;
\r
6916 if ( nDevices == device ) {
\r
6917 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6923 snd_ctl_close( chandle );
\r
6924 snd_card_next( &card );
\r
6927 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6928 if ( result == 0 ) {
\r
6929 if ( nDevices == device ) {
\r
6930 strcpy( name, "default" );
\r
6936 if ( nDevices == 0 ) {
\r
6937 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6938 error( RtAudioError::INVALID_USE );
\r
6942 if ( device >= nDevices ) {
\r
6943 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6944 error( RtAudioError::INVALID_USE );
\r
6950 // If a stream is already open, we cannot probe the stream devices.
\r
6951 // Thus, use the saved results.
\r
6952 if ( stream_.state != STREAM_CLOSED &&
\r
6953 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6954 snd_ctl_close( chandle );
\r
6955 if ( device >= devices_.size() ) {
\r
6956 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6957 error( RtAudioError::WARNING );
\r
6960 return devices_[ device ];
\r
6963 int openMode = SND_PCM_ASYNC;
\r
6964 snd_pcm_stream_t stream;
\r
6965 snd_pcm_info_t *pcminfo;
\r
6966 snd_pcm_info_alloca( &pcminfo );
\r
6967 snd_pcm_t *phandle;
\r
6968 snd_pcm_hw_params_t *params;
\r
6969 snd_pcm_hw_params_alloca( ¶ms );
\r
6971 // First try for playback unless default device (which has subdev -1)
\r
6972 stream = SND_PCM_STREAM_PLAYBACK;
\r
6973 snd_pcm_info_set_stream( pcminfo, stream );
\r
6974 if ( subdevice != -1 ) {
\r
6975 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6976 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6978 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6979 if ( result < 0 ) {
\r
6980 // Device probably doesn't support playback.
\r
6981 goto captureProbe;
\r
6985 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6986 if ( result < 0 ) {
\r
6987 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6988 errorText_ = errorStream_.str();
\r
6989 error( RtAudioError::WARNING );
\r
6990 goto captureProbe;
\r
6993 // The device is open ... fill the parameter structure.
\r
6994 result = snd_pcm_hw_params_any( phandle, params );
\r
6995 if ( result < 0 ) {
\r
6996 snd_pcm_close( phandle );
\r
6997 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6998 errorText_ = errorStream_.str();
\r
6999 error( RtAudioError::WARNING );
\r
7000 goto captureProbe;
\r
7003 // Get output channel information.
\r
7004 unsigned int value;
\r
7005 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7006 if ( result < 0 ) {
\r
7007 snd_pcm_close( phandle );
\r
7008 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7009 errorText_ = errorStream_.str();
\r
7010 error( RtAudioError::WARNING );
\r
7011 goto captureProbe;
\r
7013 info.outputChannels = value;
\r
7014 snd_pcm_close( phandle );
\r
7017 stream = SND_PCM_STREAM_CAPTURE;
\r
7018 snd_pcm_info_set_stream( pcminfo, stream );
\r
7020 // Now try for capture unless default device (with subdev = -1)
\r
7021 if ( subdevice != -1 ) {
\r
7022 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7023 snd_ctl_close( chandle );
\r
7024 if ( result < 0 ) {
\r
7025 // Device probably doesn't support capture.
\r
7026 if ( info.outputChannels == 0 ) return info;
\r
7027 goto probeParameters;
\r
7031 snd_ctl_close( chandle );
\r
7033 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7034 if ( result < 0 ) {
\r
7035 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7036 errorText_ = errorStream_.str();
\r
7037 error( RtAudioError::WARNING );
\r
7038 if ( info.outputChannels == 0 ) return info;
\r
7039 goto probeParameters;
\r
7042 // The device is open ... fill the parameter structure.
\r
7043 result = snd_pcm_hw_params_any( phandle, params );
\r
7044 if ( result < 0 ) {
\r
7045 snd_pcm_close( phandle );
\r
7046 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7047 errorText_ = errorStream_.str();
\r
7048 error( RtAudioError::WARNING );
\r
7049 if ( info.outputChannels == 0 ) return info;
\r
7050 goto probeParameters;
\r
7053 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7054 if ( result < 0 ) {
\r
7055 snd_pcm_close( phandle );
\r
7056 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7057 errorText_ = errorStream_.str();
\r
7058 error( RtAudioError::WARNING );
\r
7059 if ( info.outputChannels == 0 ) return info;
\r
7060 goto probeParameters;
\r
7062 info.inputChannels = value;
\r
7063 snd_pcm_close( phandle );
\r
7065 // If device opens for both playback and capture, we determine the channels.
\r
7066 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7067 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7069 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7070 if ( device == 0 && info.outputChannels > 0 )
\r
7071 info.isDefaultOutput = true;
\r
7072 if ( device == 0 && info.inputChannels > 0 )
\r
7073 info.isDefaultInput = true;
\r
7076 // At this point, we just need to figure out the supported data
\r
7077 // formats and sample rates. We'll proceed by opening the device in
\r
7078 // the direction with the maximum number of channels, or playback if
\r
7079 // they are equal. This might limit our sample rate options, but so
\r
7082 if ( info.outputChannels >= info.inputChannels )
\r
7083 stream = SND_PCM_STREAM_PLAYBACK;
\r
7085 stream = SND_PCM_STREAM_CAPTURE;
\r
7086 snd_pcm_info_set_stream( pcminfo, stream );
\r
7088 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7089 if ( result < 0 ) {
\r
7090 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7091 errorText_ = errorStream_.str();
\r
7092 error( RtAudioError::WARNING );
\r
7096 // The device is open ... fill the parameter structure.
\r
7097 result = snd_pcm_hw_params_any( phandle, params );
\r
7098 if ( result < 0 ) {
\r
7099 snd_pcm_close( phandle );
\r
7100 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7101 errorText_ = errorStream_.str();
\r
7102 error( RtAudioError::WARNING );
\r
7106 // Test our discrete set of sample rate values.
\r
7107 info.sampleRates.clear();
\r
7108 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7109 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7110 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7112 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7113 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7116 if ( info.sampleRates.size() == 0 ) {
\r
7117 snd_pcm_close( phandle );
\r
7118 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7119 errorText_ = errorStream_.str();
\r
7120 error( RtAudioError::WARNING );
\r
7124 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7125 snd_pcm_format_t format;
\r
7126 info.nativeFormats = 0;
\r
7127 format = SND_PCM_FORMAT_S8;
\r
7128 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7129 info.nativeFormats |= RTAUDIO_SINT8;
\r
7130 format = SND_PCM_FORMAT_S16;
\r
7131 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7132 info.nativeFormats |= RTAUDIO_SINT16;
\r
7133 format = SND_PCM_FORMAT_S24;
\r
7134 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7135 info.nativeFormats |= RTAUDIO_SINT24;
\r
7136 format = SND_PCM_FORMAT_S32;
\r
7137 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7138 info.nativeFormats |= RTAUDIO_SINT32;
\r
7139 format = SND_PCM_FORMAT_FLOAT;
\r
7140 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7141 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7142 format = SND_PCM_FORMAT_FLOAT64;
\r
7143 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7144 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7146 // Check that we have at least one supported format
\r
7147 if ( info.nativeFormats == 0 ) {
\r
7148 snd_pcm_close( phandle );
\r
7149 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7150 errorText_ = errorStream_.str();
\r
7151 error( RtAudioError::WARNING );
\r
7155 // Get the device name
\r
7157 result = snd_card_get_name( card, &cardname );
\r
7158 if ( result >= 0 ) {
\r
7159 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7164 // That's all ... close the device and return
\r
7165 snd_pcm_close( phandle );
\r
7166 info.probed = true;
\r
7170 void RtApiAlsa :: saveDeviceInfo( void )
\r
7174 unsigned int nDevices = getDeviceCount();
\r
7175 devices_.resize( nDevices );
\r
7176 for ( unsigned int i=0; i<nDevices; i++ )
\r
7177 devices_[i] = getDeviceInfo( i );
\r
7180 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7181 unsigned int firstChannel, unsigned int sampleRate,
\r
7182 RtAudioFormat format, unsigned int *bufferSize,
\r
7183 RtAudio::StreamOptions *options )
\r
7186 #if defined(__RTAUDIO_DEBUG__)
\r
7187 snd_output_t *out;
\r
7188 snd_output_stdio_attach(&out, stderr, 0);
\r
7191 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7193 unsigned nDevices = 0;
\r
7194 int result, subdevice, card;
\r
7196 snd_ctl_t *chandle;
\r
7198 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7199 snprintf(name, sizeof(name), "%s", "default");
\r
7201 // Count cards and devices
\r
7203 snd_card_next( &card );
\r
7204 while ( card >= 0 ) {
\r
7205 sprintf( name, "hw:%d", card );
\r
7206 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7207 if ( result < 0 ) {
\r
7208 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7209 errorText_ = errorStream_.str();
\r
7214 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7215 if ( result < 0 ) break;
\r
7216 if ( subdevice < 0 ) break;
\r
7217 if ( nDevices == device ) {
\r
7218 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7219 snd_ctl_close( chandle );
\r
7224 snd_ctl_close( chandle );
\r
7225 snd_card_next( &card );
\r
7228 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7229 if ( result == 0 ) {
\r
7230 if ( nDevices == device ) {
\r
7231 strcpy( name, "default" );
\r
7237 if ( nDevices == 0 ) {
\r
7238 // This should not happen because a check is made before this function is called.
\r
7239 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7243 if ( device >= nDevices ) {
\r
7244 // This should not happen because a check is made before this function is called.
\r
7245 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7252 // The getDeviceInfo() function will not work for a device that is
\r
7253 // already open. Thus, we'll probe the system before opening a
\r
7254 // stream and save the results for use by getDeviceInfo().
\r
7255 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7256 this->saveDeviceInfo();
\r
7258 snd_pcm_stream_t stream;
\r
7259 if ( mode == OUTPUT )
\r
7260 stream = SND_PCM_STREAM_PLAYBACK;
\r
7262 stream = SND_PCM_STREAM_CAPTURE;
\r
7264 snd_pcm_t *phandle;
\r
7265 int openMode = SND_PCM_ASYNC;
\r
7266 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7267 if ( result < 0 ) {
\r
7268 if ( mode == OUTPUT )
\r
7269 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7271 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7272 errorText_ = errorStream_.str();
\r
7276 // Fill the parameter structure.
\r
7277 snd_pcm_hw_params_t *hw_params;
\r
7278 snd_pcm_hw_params_alloca( &hw_params );
\r
7279 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7280 if ( result < 0 ) {
\r
7281 snd_pcm_close( phandle );
\r
7282 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7283 errorText_ = errorStream_.str();
\r
7287 #if defined(__RTAUDIO_DEBUG__)
\r
7288 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7289 snd_pcm_hw_params_dump( hw_params, out );
\r
7292 // Set access ... check user preference.
\r
7293 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7294 stream_.userInterleaved = false;
\r
7295 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7296 if ( result < 0 ) {
\r
7297 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7298 stream_.deviceInterleaved[mode] = true;
\r
7301 stream_.deviceInterleaved[mode] = false;
\r
7304 stream_.userInterleaved = true;
\r
7305 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7306 if ( result < 0 ) {
\r
7307 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7308 stream_.deviceInterleaved[mode] = false;
\r
7311 stream_.deviceInterleaved[mode] = true;
\r
7314 if ( result < 0 ) {
\r
7315 snd_pcm_close( phandle );
\r
7316 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7317 errorText_ = errorStream_.str();
\r
7321 // Determine how to set the device format.
\r
7322 stream_.userFormat = format;
\r
7323 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7325 if ( format == RTAUDIO_SINT8 )
\r
7326 deviceFormat = SND_PCM_FORMAT_S8;
\r
7327 else if ( format == RTAUDIO_SINT16 )
\r
7328 deviceFormat = SND_PCM_FORMAT_S16;
\r
7329 else if ( format == RTAUDIO_SINT24 )
\r
7330 deviceFormat = SND_PCM_FORMAT_S24;
\r
7331 else if ( format == RTAUDIO_SINT32 )
\r
7332 deviceFormat = SND_PCM_FORMAT_S32;
\r
7333 else if ( format == RTAUDIO_FLOAT32 )
\r
7334 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7335 else if ( format == RTAUDIO_FLOAT64 )
\r
7336 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7338 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7339 stream_.deviceFormat[mode] = format;
\r
7343 // The user requested format is not natively supported by the device.
\r
7344 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7345 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7346 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7350 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7351 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7352 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7356 deviceFormat = SND_PCM_FORMAT_S32;
\r
7357 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7358 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7362 deviceFormat = SND_PCM_FORMAT_S24;
\r
7363 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7364 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7368 deviceFormat = SND_PCM_FORMAT_S16;
\r
7369 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7370 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7374 deviceFormat = SND_PCM_FORMAT_S8;
\r
7375 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7376 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7380 // If we get here, no supported format was found.
\r
7381 snd_pcm_close( phandle );
\r
7382 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7383 errorText_ = errorStream_.str();
\r
7387 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7388 if ( result < 0 ) {
\r
7389 snd_pcm_close( phandle );
\r
7390 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7391 errorText_ = errorStream_.str();
\r
7395 // Determine whether byte-swaping is necessary.
\r
7396 stream_.doByteSwap[mode] = false;
\r
7397 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7398 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7399 if ( result == 0 )
\r
7400 stream_.doByteSwap[mode] = true;
\r
7401 else if (result < 0) {
\r
7402 snd_pcm_close( phandle );
\r
7403 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7404 errorText_ = errorStream_.str();
\r
7409 // Set the sample rate.
\r
7410 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7411 if ( result < 0 ) {
\r
7412 snd_pcm_close( phandle );
\r
7413 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7414 errorText_ = errorStream_.str();
\r
7418 // Determine the number of channels for this device. We support a possible
\r
7419 // minimum device channel number > than the value requested by the user.
\r
7420 stream_.nUserChannels[mode] = channels;
\r
7421 unsigned int value;
\r
7422 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7423 unsigned int deviceChannels = value;
\r
7424 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7425 snd_pcm_close( phandle );
\r
7426 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7427 errorText_ = errorStream_.str();
\r
7431 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7432 if ( result < 0 ) {
\r
7433 snd_pcm_close( phandle );
\r
7434 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7435 errorText_ = errorStream_.str();
\r
7438 deviceChannels = value;
\r
7439 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7440 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7442 // Set the device channels.
\r
7443 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7444 if ( result < 0 ) {
\r
7445 snd_pcm_close( phandle );
\r
7446 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7447 errorText_ = errorStream_.str();
\r
7451 // Set the buffer (or period) size.
\r
7453 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7454 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7455 if ( result < 0 ) {
\r
7456 snd_pcm_close( phandle );
\r
7457 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7458 errorText_ = errorStream_.str();
\r
7461 *bufferSize = periodSize;
\r
7463 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7464 unsigned int periods = 0;
\r
7465 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7466 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7467 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7468 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7469 if ( result < 0 ) {
\r
7470 snd_pcm_close( phandle );
\r
7471 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7472 errorText_ = errorStream_.str();
\r
7476 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7477 // MUST be the same in both directions!
\r
7478 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7479 snd_pcm_close( phandle );
\r
7480 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7481 errorText_ = errorStream_.str();
\r
7485 stream_.bufferSize = *bufferSize;
\r
7487 // Install the hardware configuration
\r
7488 result = snd_pcm_hw_params( phandle, hw_params );
\r
7489 if ( result < 0 ) {
\r
7490 snd_pcm_close( phandle );
\r
7491 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7492 errorText_ = errorStream_.str();
\r
7496 #if defined(__RTAUDIO_DEBUG__)
\r
7497 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7498 snd_pcm_hw_params_dump( hw_params, out );
\r
7501 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7502 snd_pcm_sw_params_t *sw_params = NULL;
\r
7503 snd_pcm_sw_params_alloca( &sw_params );
\r
7504 snd_pcm_sw_params_current( phandle, sw_params );
\r
7505 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7506 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7507 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7509 // The following two settings were suggested by Theo Veenker
\r
7510 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7511 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7513 // here are two options for a fix
\r
7514 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7515 snd_pcm_uframes_t val;
\r
7516 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7517 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7519 result = snd_pcm_sw_params( phandle, sw_params );
\r
7520 if ( result < 0 ) {
\r
7521 snd_pcm_close( phandle );
\r
7522 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7523 errorText_ = errorStream_.str();
\r
7527 #if defined(__RTAUDIO_DEBUG__)
\r
7528 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7529 snd_pcm_sw_params_dump( sw_params, out );
\r
7532 // Set flags for buffer conversion
\r
7533 stream_.doConvertBuffer[mode] = false;
\r
7534 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7535 stream_.doConvertBuffer[mode] = true;
\r
7536 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7537 stream_.doConvertBuffer[mode] = true;
\r
7538 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7539 stream_.nUserChannels[mode] > 1 )
\r
7540 stream_.doConvertBuffer[mode] = true;
\r
7542 // Allocate the ApiHandle if necessary and then save.
\r
7543 AlsaHandle *apiInfo = 0;
\r
7544 if ( stream_.apiHandle == 0 ) {
\r
7546 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7548 catch ( std::bad_alloc& ) {
\r
7549 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7553 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7554 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7558 stream_.apiHandle = (void *) apiInfo;
\r
7559 apiInfo->handles[0] = 0;
\r
7560 apiInfo->handles[1] = 0;
\r
7563 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7565 apiInfo->handles[mode] = phandle;
\r
7568 // Allocate necessary internal buffers.
\r
7569 unsigned long bufferBytes;
\r
7570 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7571 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7572 if ( stream_.userBuffer[mode] == NULL ) {
\r
7573 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7577 if ( stream_.doConvertBuffer[mode] ) {
\r
7579 bool makeBuffer = true;
\r
7580 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7581 if ( mode == INPUT ) {
\r
7582 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7583 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7584 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7588 if ( makeBuffer ) {
\r
7589 bufferBytes *= *bufferSize;
\r
7590 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7591 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7592 if ( stream_.deviceBuffer == NULL ) {
\r
7593 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7599 stream_.sampleRate = sampleRate;
\r
7600 stream_.nBuffers = periods;
\r
7601 stream_.device[mode] = device;
\r
7602 stream_.state = STREAM_STOPPED;
\r
7604 // Setup the buffer conversion information structure.
\r
7605 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7607 // Setup thread if necessary.
\r
7608 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7609 // We had already set up an output stream.
\r
7610 stream_.mode = DUPLEX;
\r
7611 // Link the streams if possible.
\r
7612 apiInfo->synchronized = false;
\r
7613 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7614 apiInfo->synchronized = true;
\r
7616 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7617 error( RtAudioError::WARNING );
\r
7621 stream_.mode = mode;
\r
7623 // Setup callback thread.
\r
7624 stream_.callbackInfo.object = (void *) this;
\r
7626 // Set the thread attributes for joinable and realtime scheduling
\r
7627 // priority (optional). The higher priority will only take affect
\r
7628 // if the program is run as root or suid. Note, under Linux
\r
7629 // processes with CAP_SYS_NICE privilege, a user can change
\r
7630 // scheduling policy and priority (thus need not be root). See
\r
7631 // POSIX "capabilities".
\r
7632 pthread_attr_t attr;
\r
7633 pthread_attr_init( &attr );
\r
7634 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7636 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7637 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7638 // We previously attempted to increase the audio callback priority
\r
7639 // to SCHED_RR here via the attributes. However, while no errors
\r
7640 // were reported in doing so, it did not work. So, now this is
\r
7641 // done in the alsaCallbackHandler function.
\r
7642 stream_.callbackInfo.doRealtime = true;
\r
7643 int priority = options->priority;
\r
7644 int min = sched_get_priority_min( SCHED_RR );
\r
7645 int max = sched_get_priority_max( SCHED_RR );
\r
7646 if ( priority < min ) priority = min;
\r
7647 else if ( priority > max ) priority = max;
\r
7648 stream_.callbackInfo.priority = priority;
\r
7652 stream_.callbackInfo.isRunning = true;
\r
7653 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7654 pthread_attr_destroy( &attr );
\r
7656 stream_.callbackInfo.isRunning = false;
\r
7657 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7666 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7667 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7668 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7670 stream_.apiHandle = 0;
\r
7673 if ( phandle) snd_pcm_close( phandle );
\r
7675 for ( int i=0; i<2; i++ ) {
\r
7676 if ( stream_.userBuffer[i] ) {
\r
7677 free( stream_.userBuffer[i] );
\r
7678 stream_.userBuffer[i] = 0;
\r
7682 if ( stream_.deviceBuffer ) {
\r
7683 free( stream_.deviceBuffer );
\r
7684 stream_.deviceBuffer = 0;
\r
7687 stream_.state = STREAM_CLOSED;
\r
7691 void RtApiAlsa :: closeStream()
\r
7693 if ( stream_.state == STREAM_CLOSED ) {
\r
7694 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7695 error( RtAudioError::WARNING );
\r
7699 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7700 stream_.callbackInfo.isRunning = false;
\r
7701 MUTEX_LOCK( &stream_.mutex );
\r
7702 if ( stream_.state == STREAM_STOPPED ) {
\r
7703 apiInfo->runnable = true;
\r
7704 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7706 MUTEX_UNLOCK( &stream_.mutex );
\r
7707 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7709 if ( stream_.state == STREAM_RUNNING ) {
\r
7710 stream_.state = STREAM_STOPPED;
\r
7711 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7712 snd_pcm_drop( apiInfo->handles[0] );
\r
7713 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7714 snd_pcm_drop( apiInfo->handles[1] );
\r
7718 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7719 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7720 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7722 stream_.apiHandle = 0;
\r
7725 for ( int i=0; i<2; i++ ) {
\r
7726 if ( stream_.userBuffer[i] ) {
\r
7727 free( stream_.userBuffer[i] );
\r
7728 stream_.userBuffer[i] = 0;
\r
7732 if ( stream_.deviceBuffer ) {
\r
7733 free( stream_.deviceBuffer );
\r
7734 stream_.deviceBuffer = 0;
\r
7737 stream_.mode = UNINITIALIZED;
\r
7738 stream_.state = STREAM_CLOSED;
\r
7741 void RtApiAlsa :: startStream()
\r
7743 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7746 if ( stream_.state == STREAM_RUNNING ) {
\r
7747 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7748 error( RtAudioError::WARNING );
\r
7752 MUTEX_LOCK( &stream_.mutex );
\r
7755 snd_pcm_state_t state;
\r
7756 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7757 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7758 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7759 state = snd_pcm_state( handle[0] );
\r
7760 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7761 result = snd_pcm_prepare( handle[0] );
\r
7762 if ( result < 0 ) {
\r
7763 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7764 errorText_ = errorStream_.str();
\r
7770 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7771 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7772 state = snd_pcm_state( handle[1] );
\r
7773 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7774 result = snd_pcm_prepare( handle[1] );
\r
7775 if ( result < 0 ) {
\r
7776 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7777 errorText_ = errorStream_.str();
\r
7783 stream_.state = STREAM_RUNNING;
\r
7786 apiInfo->runnable = true;
\r
7787 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7788 MUTEX_UNLOCK( &stream_.mutex );
\r
7790 if ( result >= 0 ) return;
\r
7791 error( RtAudioError::SYSTEM_ERROR );
\r
7794 void RtApiAlsa :: stopStream()
\r
7797 if ( stream_.state == STREAM_STOPPED ) {
\r
7798 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7799 error( RtAudioError::WARNING );
\r
7803 stream_.state = STREAM_STOPPED;
\r
7804 MUTEX_LOCK( &stream_.mutex );
\r
7807 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7808 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7809 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7810 if ( apiInfo->synchronized )
\r
7811 result = snd_pcm_drop( handle[0] );
\r
7813 result = snd_pcm_drain( handle[0] );
\r
7814 if ( result < 0 ) {
\r
7815 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7816 errorText_ = errorStream_.str();
\r
7821 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7822 result = snd_pcm_drop( handle[1] );
\r
7823 if ( result < 0 ) {
\r
7824 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7825 errorText_ = errorStream_.str();
\r
7831 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7832 MUTEX_UNLOCK( &stream_.mutex );
\r
7834 if ( result >= 0 ) return;
\r
7835 error( RtAudioError::SYSTEM_ERROR );
\r
7838 void RtApiAlsa :: abortStream()
\r
7841 if ( stream_.state == STREAM_STOPPED ) {
\r
7842 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7843 error( RtAudioError::WARNING );
\r
7847 stream_.state = STREAM_STOPPED;
\r
7848 MUTEX_LOCK( &stream_.mutex );
\r
7851 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7852 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7853 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7854 result = snd_pcm_drop( handle[0] );
\r
7855 if ( result < 0 ) {
\r
7856 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7857 errorText_ = errorStream_.str();
\r
7862 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7863 result = snd_pcm_drop( handle[1] );
\r
7864 if ( result < 0 ) {
\r
7865 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7866 errorText_ = errorStream_.str();
\r
7872 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7873 MUTEX_UNLOCK( &stream_.mutex );
\r
7875 if ( result >= 0 ) return;
\r
7876 error( RtAudioError::SYSTEM_ERROR );
\r
7879 void RtApiAlsa :: callbackEvent()
\r
7881 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7882 if ( stream_.state == STREAM_STOPPED ) {
\r
7883 MUTEX_LOCK( &stream_.mutex );
\r
7884 while ( !apiInfo->runnable )
\r
7885 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7887 if ( stream_.state != STREAM_RUNNING ) {
\r
7888 MUTEX_UNLOCK( &stream_.mutex );
\r
7891 MUTEX_UNLOCK( &stream_.mutex );
\r
7894 if ( stream_.state == STREAM_CLOSED ) {
\r
7895 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7896 error( RtAudioError::WARNING );
\r
7900 int doStopStream = 0;
\r
7901 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7902 double streamTime = getStreamTime();
\r
7903 RtAudioStreamStatus status = 0;
\r
7904 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7905 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7906 apiInfo->xrun[0] = false;
\r
7908 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7909 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7910 apiInfo->xrun[1] = false;
\r
7912 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7913 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7915 if ( doStopStream == 2 ) {
\r
7920 MUTEX_LOCK( &stream_.mutex );
\r
7922 // The state might change while waiting on a mutex.
\r
7923 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7928 snd_pcm_t **handle;
\r
7929 snd_pcm_sframes_t frames;
\r
7930 RtAudioFormat format;
\r
7931 handle = (snd_pcm_t **) apiInfo->handles;
\r
7933 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7935 // Setup parameters.
\r
7936 if ( stream_.doConvertBuffer[1] ) {
\r
7937 buffer = stream_.deviceBuffer;
\r
7938 channels = stream_.nDeviceChannels[1];
\r
7939 format = stream_.deviceFormat[1];
\r
7942 buffer = stream_.userBuffer[1];
\r
7943 channels = stream_.nUserChannels[1];
\r
7944 format = stream_.userFormat;
\r
7947 // Read samples from device in interleaved/non-interleaved format.
\r
7948 if ( stream_.deviceInterleaved[1] )
\r
7949 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7951 void *bufs[channels];
\r
7952 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7953 for ( int i=0; i<channels; i++ )
\r
7954 bufs[i] = (void *) (buffer + (i * offset));
\r
7955 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7958 if ( result < (int) stream_.bufferSize ) {
\r
7959 // Either an error or overrun occured.
\r
7960 if ( result == -EPIPE ) {
\r
7961 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7962 if ( state == SND_PCM_STATE_XRUN ) {
\r
7963 apiInfo->xrun[1] = true;
\r
7964 result = snd_pcm_prepare( handle[1] );
\r
7965 if ( result < 0 ) {
\r
7966 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7967 errorText_ = errorStream_.str();
\r
7971 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7972 errorText_ = errorStream_.str();
\r
7976 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7977 errorText_ = errorStream_.str();
\r
7979 error( RtAudioError::WARNING );
\r
7983 // Do byte swapping if necessary.
\r
7984 if ( stream_.doByteSwap[1] )
\r
7985 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7987 // Do buffer conversion if necessary.
\r
7988 if ( stream_.doConvertBuffer[1] )
\r
7989 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7991 // Check stream latency
\r
7992 result = snd_pcm_delay( handle[1], &frames );
\r
7993 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7998 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8000 // Setup parameters and do buffer conversion if necessary.
\r
8001 if ( stream_.doConvertBuffer[0] ) {
\r
8002 buffer = stream_.deviceBuffer;
\r
8003 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8004 channels = stream_.nDeviceChannels[0];
\r
8005 format = stream_.deviceFormat[0];
\r
8008 buffer = stream_.userBuffer[0];
\r
8009 channels = stream_.nUserChannels[0];
\r
8010 format = stream_.userFormat;
\r
8013 // Do byte swapping if necessary.
\r
8014 if ( stream_.doByteSwap[0] )
\r
8015 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8017 // Write samples to device in interleaved/non-interleaved format.
\r
8018 if ( stream_.deviceInterleaved[0] )
\r
8019 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8021 void *bufs[channels];
\r
8022 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8023 for ( int i=0; i<channels; i++ )
\r
8024 bufs[i] = (void *) (buffer + (i * offset));
\r
8025 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8028 if ( result < (int) stream_.bufferSize ) {
\r
8029 // Either an error or underrun occured.
\r
8030 if ( result == -EPIPE ) {
\r
8031 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8032 if ( state == SND_PCM_STATE_XRUN ) {
\r
8033 apiInfo->xrun[0] = true;
\r
8034 result = snd_pcm_prepare( handle[0] );
\r
8035 if ( result < 0 ) {
\r
8036 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8037 errorText_ = errorStream_.str();
\r
8040 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8043 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8044 errorText_ = errorStream_.str();
\r
8048 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8049 errorText_ = errorStream_.str();
\r
8051 error( RtAudioError::WARNING );
\r
8055 // Check stream latency
\r
8056 result = snd_pcm_delay( handle[0], &frames );
\r
8057 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8061 MUTEX_UNLOCK( &stream_.mutex );
\r
8063 RtApi::tickStreamTime();
\r
8064 if ( doStopStream == 1 ) this->stopStream();
\r
8067 static void *alsaCallbackHandler( void *ptr )
\r
8069 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8070 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8071 bool *isRunning = &info->isRunning;
\r
8073 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8074 if ( &info->doRealtime ) {
\r
8075 pthread_t tID = pthread_self(); // ID of this thread
\r
8076 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8077 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8081 while ( *isRunning == true ) {
\r
8082 pthread_testcancel();
\r
8083 object->callbackEvent();
\r
8086 pthread_exit( NULL );
\r
8089 //******************** End of __LINUX_ALSA__ *********************//
\r
8092 #if defined(__LINUX_PULSE__)
\r
8094 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8095 // and Tristan Matthews.
\r
8097 #include <pulse/error.h>
\r
8098 #include <pulse/simple.h>
\r
8101 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8102 44100, 48000, 96000, 0};
\r
8104 struct rtaudio_pa_format_mapping_t {
\r
8105 RtAudioFormat rtaudio_format;
\r
8106 pa_sample_format_t pa_format;
\r
8109 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8110 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8111 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8112 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8113 {0, PA_SAMPLE_INVALID}};
\r
8115 struct PulseAudioHandle {
\r
8116 pa_simple *s_play;
\r
8119 pthread_cond_t runnable_cv;
\r
8121 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8124 RtApiPulse::~RtApiPulse()
\r
8126 if ( stream_.state != STREAM_CLOSED )
\r
8130 unsigned int RtApiPulse::getDeviceCount( void )
\r
8135 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8137 RtAudio::DeviceInfo info;
\r
8138 info.probed = true;
\r
8139 info.name = "PulseAudio";
\r
8140 info.outputChannels = 2;
\r
8141 info.inputChannels = 2;
\r
8142 info.duplexChannels = 2;
\r
8143 info.isDefaultOutput = true;
\r
8144 info.isDefaultInput = true;
\r
8146 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8147 info.sampleRates.push_back( *sr );
\r
8149 info.preferredSampleRate = 48000;
\r
8150 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8155 static void *pulseaudio_callback( void * user )
\r
8157 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8158 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8159 volatile bool *isRunning = &cbi->isRunning;
\r
8161 while ( *isRunning ) {
\r
8162 pthread_testcancel();
\r
8163 context->callbackEvent();
\r
8166 pthread_exit( NULL );
\r
8169 void RtApiPulse::closeStream( void )
\r
8171 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8173 stream_.callbackInfo.isRunning = false;
\r
8175 MUTEX_LOCK( &stream_.mutex );
\r
8176 if ( stream_.state == STREAM_STOPPED ) {
\r
8177 pah->runnable = true;
\r
8178 pthread_cond_signal( &pah->runnable_cv );
\r
8180 MUTEX_UNLOCK( &stream_.mutex );
\r
8182 pthread_join( pah->thread, 0 );
\r
8183 if ( pah->s_play ) {
\r
8184 pa_simple_flush( pah->s_play, NULL );
\r
8185 pa_simple_free( pah->s_play );
\r
8188 pa_simple_free( pah->s_rec );
\r
8190 pthread_cond_destroy( &pah->runnable_cv );
\r
8192 stream_.apiHandle = 0;
\r
8195 if ( stream_.userBuffer[0] ) {
\r
8196 free( stream_.userBuffer[0] );
\r
8197 stream_.userBuffer[0] = 0;
\r
8199 if ( stream_.userBuffer[1] ) {
\r
8200 free( stream_.userBuffer[1] );
\r
8201 stream_.userBuffer[1] = 0;
\r
8204 stream_.state = STREAM_CLOSED;
\r
8205 stream_.mode = UNINITIALIZED;
\r
8208 void RtApiPulse::callbackEvent( void )
\r
8210 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8212 if ( stream_.state == STREAM_STOPPED ) {
\r
8213 MUTEX_LOCK( &stream_.mutex );
\r
8214 while ( !pah->runnable )
\r
8215 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8217 if ( stream_.state != STREAM_RUNNING ) {
\r
8218 MUTEX_UNLOCK( &stream_.mutex );
\r
8221 MUTEX_UNLOCK( &stream_.mutex );
\r
8224 if ( stream_.state == STREAM_CLOSED ) {
\r
8225 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8226 "this shouldn't happen!";
\r
8227 error( RtAudioError::WARNING );
\r
8231 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8232 double streamTime = getStreamTime();
\r
8233 RtAudioStreamStatus status = 0;
\r
8234 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8235 stream_.bufferSize, streamTime, status,
\r
8236 stream_.callbackInfo.userData );
\r
8238 if ( doStopStream == 2 ) {
\r
8243 MUTEX_LOCK( &stream_.mutex );
\r
8244 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8245 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8247 if ( stream_.state != STREAM_RUNNING )
\r
8252 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8253 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8254 convertBuffer( stream_.deviceBuffer,
\r
8255 stream_.userBuffer[OUTPUT],
\r
8256 stream_.convertInfo[OUTPUT] );
\r
8257 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8258 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8260 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8261 formatBytes( stream_.userFormat );
\r
8263 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8264 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8265 pa_strerror( pa_error ) << ".";
\r
8266 errorText_ = errorStream_.str();
\r
8267 error( RtAudioError::WARNING );
\r
8271 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8272 if ( stream_.doConvertBuffer[INPUT] )
\r
8273 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8274 formatBytes( stream_.deviceFormat[INPUT] );
\r
8276 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8277 formatBytes( stream_.userFormat );
\r
8279 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8280 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8281 pa_strerror( pa_error ) << ".";
\r
8282 errorText_ = errorStream_.str();
\r
8283 error( RtAudioError::WARNING );
\r
8285 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8286 convertBuffer( stream_.userBuffer[INPUT],
\r
8287 stream_.deviceBuffer,
\r
8288 stream_.convertInfo[INPUT] );
\r
8293 MUTEX_UNLOCK( &stream_.mutex );
\r
8294 RtApi::tickStreamTime();
\r
8296 if ( doStopStream == 1 )
\r
8300 void RtApiPulse::startStream( void )
\r
8302 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8304 if ( stream_.state == STREAM_CLOSED ) {
\r
8305 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8306 error( RtAudioError::INVALID_USE );
\r
8309 if ( stream_.state == STREAM_RUNNING ) {
\r
8310 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8311 error( RtAudioError::WARNING );
\r
8315 MUTEX_LOCK( &stream_.mutex );
\r
8317 stream_.state = STREAM_RUNNING;
\r
8319 pah->runnable = true;
\r
8320 pthread_cond_signal( &pah->runnable_cv );
\r
8321 MUTEX_UNLOCK( &stream_.mutex );
\r
8324 void RtApiPulse::stopStream( void )
\r
8326 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8328 if ( stream_.state == STREAM_CLOSED ) {
\r
8329 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8330 error( RtAudioError::INVALID_USE );
\r
8333 if ( stream_.state == STREAM_STOPPED ) {
\r
8334 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8335 error( RtAudioError::WARNING );
\r
8339 stream_.state = STREAM_STOPPED;
\r
8340 MUTEX_LOCK( &stream_.mutex );
\r
8342 if ( pah && pah->s_play ) {
\r
8344 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8345 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8346 pa_strerror( pa_error ) << ".";
\r
8347 errorText_ = errorStream_.str();
\r
8348 MUTEX_UNLOCK( &stream_.mutex );
\r
8349 error( RtAudioError::SYSTEM_ERROR );
\r
8354 stream_.state = STREAM_STOPPED;
\r
8355 MUTEX_UNLOCK( &stream_.mutex );
\r
8358 void RtApiPulse::abortStream( void )
\r
8360 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8362 if ( stream_.state == STREAM_CLOSED ) {
\r
8363 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8364 error( RtAudioError::INVALID_USE );
\r
8367 if ( stream_.state == STREAM_STOPPED ) {
\r
8368 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8369 error( RtAudioError::WARNING );
\r
8373 stream_.state = STREAM_STOPPED;
\r
8374 MUTEX_LOCK( &stream_.mutex );
\r
8376 if ( pah && pah->s_play ) {
\r
8378 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8379 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8380 pa_strerror( pa_error ) << ".";
\r
8381 errorText_ = errorStream_.str();
\r
8382 MUTEX_UNLOCK( &stream_.mutex );
\r
8383 error( RtAudioError::SYSTEM_ERROR );
\r
8388 stream_.state = STREAM_STOPPED;
\r
8389 MUTEX_UNLOCK( &stream_.mutex );
\r
8392 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8393 unsigned int channels, unsigned int firstChannel,
\r
8394 unsigned int sampleRate, RtAudioFormat format,
\r
8395 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8397 PulseAudioHandle *pah = 0;
\r
8398 unsigned long bufferBytes = 0;
\r
8399 pa_sample_spec ss;
\r
8401 if ( device != 0 ) return false;
\r
8402 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8403 if ( channels != 1 && channels != 2 ) {
\r
8404 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8407 ss.channels = channels;
\r
8409 if ( firstChannel != 0 ) return false;
\r
8411 bool sr_found = false;
\r
8412 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8413 if ( sampleRate == *sr ) {
\r
8415 stream_.sampleRate = sampleRate;
\r
8416 ss.rate = sampleRate;
\r
8420 if ( !sr_found ) {
\r
8421 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8425 bool sf_found = 0;
\r
8426 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8427 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8428 if ( format == sf->rtaudio_format ) {
\r
8430 stream_.userFormat = sf->rtaudio_format;
\r
8431 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8432 ss.format = sf->pa_format;
\r
8436 if ( !sf_found ) { // Use internal data format conversion.
\r
8437 stream_.userFormat = format;
\r
8438 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8439 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8442 // Set other stream parameters.
\r
8443 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8444 else stream_.userInterleaved = true;
\r
8445 stream_.deviceInterleaved[mode] = true;
\r
8446 stream_.nBuffers = 1;
\r
8447 stream_.doByteSwap[mode] = false;
\r
8448 stream_.nUserChannels[mode] = channels;
\r
8449 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8450 stream_.channelOffset[mode] = 0;
\r
8451 std::string streamName = "RtAudio";
\r
8453 // Set flags for buffer conversion.
\r
8454 stream_.doConvertBuffer[mode] = false;
\r
8455 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8456 stream_.doConvertBuffer[mode] = true;
\r
8457 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8458 stream_.doConvertBuffer[mode] = true;
\r
8460 // Allocate necessary internal buffers.
\r
8461 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8462 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8463 if ( stream_.userBuffer[mode] == NULL ) {
\r
8464 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8467 stream_.bufferSize = *bufferSize;
\r
8469 if ( stream_.doConvertBuffer[mode] ) {
\r
8471 bool makeBuffer = true;
\r
8472 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8473 if ( mode == INPUT ) {
\r
8474 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8475 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8476 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8480 if ( makeBuffer ) {
\r
8481 bufferBytes *= *bufferSize;
\r
8482 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8483 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8484 if ( stream_.deviceBuffer == NULL ) {
\r
8485 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8491 stream_.device[mode] = device;
\r
8493 // Setup the buffer conversion information structure.
\r
8494 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8496 if ( !stream_.apiHandle ) {
\r
8497 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8499 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8503 stream_.apiHandle = pah;
\r
8504 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8505 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8509 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8512 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8515 pa_buffer_attr buffer_attr;
\r
8516 buffer_attr.fragsize = bufferBytes;
\r
8517 buffer_attr.maxlength = -1;
\r
8519 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8520 if ( !pah->s_rec ) {
\r
8521 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8526 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8527 if ( !pah->s_play ) {
\r
8528 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8536 if ( stream_.mode == UNINITIALIZED )
\r
8537 stream_.mode = mode;
\r
8538 else if ( stream_.mode == mode )
\r
8541 stream_.mode = DUPLEX;
\r
8543 if ( !stream_.callbackInfo.isRunning ) {
\r
8544 stream_.callbackInfo.object = this;
\r
8545 stream_.callbackInfo.isRunning = true;
\r
8546 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8547 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8552 stream_.state = STREAM_STOPPED;
\r
8556 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8557 pthread_cond_destroy( &pah->runnable_cv );
\r
8559 stream_.apiHandle = 0;
\r
8562 for ( int i=0; i<2; i++ ) {
\r
8563 if ( stream_.userBuffer[i] ) {
\r
8564 free( stream_.userBuffer[i] );
\r
8565 stream_.userBuffer[i] = 0;
\r
8569 if ( stream_.deviceBuffer ) {
\r
8570 free( stream_.deviceBuffer );
\r
8571 stream_.deviceBuffer = 0;
\r
8577 //******************** End of __LINUX_PULSE__ *********************//
\r
8580 #if defined(__LINUX_OSS__)
\r
8582 #include <unistd.h>
\r
8583 #include <sys/ioctl.h>
\r
8584 #include <unistd.h>
\r
8585 #include <fcntl.h>
\r
8586 #include <sys/soundcard.h>
\r
8587 #include <errno.h>
\r
8590 static void *ossCallbackHandler(void * ptr);
\r
8592 // A structure to hold various information related to the OSS API
\r
8593 // implementation.
\r
8594 struct OssHandle {
\r
8595 int id[2]; // device ids
\r
8598 pthread_cond_t runnable;
\r
8601 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8604 RtApiOss :: RtApiOss()
\r
8606 // Nothing to do here.
\r
8609 RtApiOss :: ~RtApiOss()
\r
8611 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8614 unsigned int RtApiOss :: getDeviceCount( void )
\r
8616 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8617 if ( mixerfd == -1 ) {
\r
8618 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8619 error( RtAudioError::WARNING );
\r
8623 oss_sysinfo sysinfo;
\r
8624 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8626 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8627 error( RtAudioError::WARNING );
\r
8632 return sysinfo.numaudios;
\r
8635 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8637 RtAudio::DeviceInfo info;
\r
8638 info.probed = false;
\r
8640 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8641 if ( mixerfd == -1 ) {
\r
8642 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8643 error( RtAudioError::WARNING );
\r
8647 oss_sysinfo sysinfo;
\r
8648 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8649 if ( result == -1 ) {
\r
8651 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8652 error( RtAudioError::WARNING );
\r
8656 unsigned nDevices = sysinfo.numaudios;
\r
8657 if ( nDevices == 0 ) {
\r
8659 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8660 error( RtAudioError::INVALID_USE );
\r
8664 if ( device >= nDevices ) {
\r
8666 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8667 error( RtAudioError::INVALID_USE );
\r
8671 oss_audioinfo ainfo;
\r
8672 ainfo.dev = device;
\r
8673 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8675 if ( result == -1 ) {
\r
8676 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8677 errorText_ = errorStream_.str();
\r
8678 error( RtAudioError::WARNING );
\r
8683 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8684 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8685 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8686 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8687 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8690 // Probe data formats ... do for input
\r
8691 unsigned long mask = ainfo.iformats;
\r
8692 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8693 info.nativeFormats |= RTAUDIO_SINT16;
\r
8694 if ( mask & AFMT_S8 )
\r
8695 info.nativeFormats |= RTAUDIO_SINT8;
\r
8696 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8697 info.nativeFormats |= RTAUDIO_SINT32;
\r
8698 if ( mask & AFMT_FLOAT )
\r
8699 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8700 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8701 info.nativeFormats |= RTAUDIO_SINT24;
\r
8703 // Check that we have at least one supported format
\r
8704 if ( info.nativeFormats == 0 ) {
\r
8705 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8706 errorText_ = errorStream_.str();
\r
8707 error( RtAudioError::WARNING );
\r
8711 // Probe the supported sample rates.
\r
8712 info.sampleRates.clear();
\r
8713 if ( ainfo.nrates ) {
\r
8714 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8715 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8716 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8717 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8719 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8720 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8728 // Check min and max rate values;
\r
8729 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8730 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8731 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8733 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8734 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8739 if ( info.sampleRates.size() == 0 ) {
\r
8740 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8741 errorText_ = errorStream_.str();
\r
8742 error( RtAudioError::WARNING );
\r
8745 info.probed = true;
\r
8746 info.name = ainfo.name;
\r
8753 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8754 unsigned int firstChannel, unsigned int sampleRate,
\r
8755 RtAudioFormat format, unsigned int *bufferSize,
\r
8756 RtAudio::StreamOptions *options )
\r
8758 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8759 if ( mixerfd == -1 ) {
\r
8760 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8764 oss_sysinfo sysinfo;
\r
8765 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8766 if ( result == -1 ) {
\r
8768 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8772 unsigned nDevices = sysinfo.numaudios;
\r
8773 if ( nDevices == 0 ) {
\r
8774 // This should not happen because a check is made before this function is called.
\r
8776 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8780 if ( device >= nDevices ) {
\r
8781 // This should not happen because a check is made before this function is called.
\r
8783 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8787 oss_audioinfo ainfo;
\r
8788 ainfo.dev = device;
\r
8789 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8791 if ( result == -1 ) {
\r
8792 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8793 errorText_ = errorStream_.str();
\r
8797 // Check if device supports input or output
\r
8798 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8799 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8800 if ( mode == OUTPUT )
\r
8801 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8803 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8804 errorText_ = errorStream_.str();
\r
8809 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8810 if ( mode == OUTPUT )
\r
8811 flags |= O_WRONLY;
\r
8812 else { // mode == INPUT
\r
8813 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8814 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8815 close( handle->id[0] );
\r
8816 handle->id[0] = 0;
\r
8817 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8818 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8819 errorText_ = errorStream_.str();
\r
8822 // Check that the number previously set channels is the same.
\r
8823 if ( stream_.nUserChannels[0] != channels ) {
\r
8824 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8825 errorText_ = errorStream_.str();
\r
8831 flags |= O_RDONLY;
\r
8834 // Set exclusive access if specified.
\r
8835 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8837 // Try to open the device.
\r
8839 fd = open( ainfo.devnode, flags, 0 );
\r
8841 if ( errno == EBUSY )
\r
8842 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8844 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8845 errorText_ = errorStream_.str();
\r
8849 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8851 if ( flags | O_RDWR ) {
\r
8852 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8853 if ( result == -1) {
\r
8854 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8855 errorText_ = errorStream_.str();
\r
8861 // Check the device channel support.
\r
8862 stream_.nUserChannels[mode] = channels;
\r
8863 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8865 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8866 errorText_ = errorStream_.str();
\r
8870 // Set the number of channels.
\r
8871 int deviceChannels = channels + firstChannel;
\r
8872 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8873 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8875 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8876 errorText_ = errorStream_.str();
\r
8879 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8881 // Get the data format mask
\r
8883 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8884 if ( result == -1 ) {
\r
8886 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8887 errorText_ = errorStream_.str();
\r
8891 // Determine how to set the device format.
\r
8892 stream_.userFormat = format;
\r
8893 int deviceFormat = -1;
\r
8894 stream_.doByteSwap[mode] = false;
\r
8895 if ( format == RTAUDIO_SINT8 ) {
\r
8896 if ( mask & AFMT_S8 ) {
\r
8897 deviceFormat = AFMT_S8;
\r
8898 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8901 else if ( format == RTAUDIO_SINT16 ) {
\r
8902 if ( mask & AFMT_S16_NE ) {
\r
8903 deviceFormat = AFMT_S16_NE;
\r
8904 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8906 else if ( mask & AFMT_S16_OE ) {
\r
8907 deviceFormat = AFMT_S16_OE;
\r
8908 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8909 stream_.doByteSwap[mode] = true;
\r
8912 else if ( format == RTAUDIO_SINT24 ) {
\r
8913 if ( mask & AFMT_S24_NE ) {
\r
8914 deviceFormat = AFMT_S24_NE;
\r
8915 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8917 else if ( mask & AFMT_S24_OE ) {
\r
8918 deviceFormat = AFMT_S24_OE;
\r
8919 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8920 stream_.doByteSwap[mode] = true;
\r
8923 else if ( format == RTAUDIO_SINT32 ) {
\r
8924 if ( mask & AFMT_S32_NE ) {
\r
8925 deviceFormat = AFMT_S32_NE;
\r
8926 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8928 else if ( mask & AFMT_S32_OE ) {
\r
8929 deviceFormat = AFMT_S32_OE;
\r
8930 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8931 stream_.doByteSwap[mode] = true;
\r
8935 if ( deviceFormat == -1 ) {
\r
8936 // The user requested format is not natively supported by the device.
\r
8937 if ( mask & AFMT_S16_NE ) {
\r
8938 deviceFormat = AFMT_S16_NE;
\r
8939 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8941 else if ( mask & AFMT_S32_NE ) {
\r
8942 deviceFormat = AFMT_S32_NE;
\r
8943 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8945 else if ( mask & AFMT_S24_NE ) {
\r
8946 deviceFormat = AFMT_S24_NE;
\r
8947 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8949 else if ( mask & AFMT_S16_OE ) {
\r
8950 deviceFormat = AFMT_S16_OE;
\r
8951 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8952 stream_.doByteSwap[mode] = true;
\r
8954 else if ( mask & AFMT_S32_OE ) {
\r
8955 deviceFormat = AFMT_S32_OE;
\r
8956 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8957 stream_.doByteSwap[mode] = true;
\r
8959 else if ( mask & AFMT_S24_OE ) {
\r
8960 deviceFormat = AFMT_S24_OE;
\r
8961 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8962 stream_.doByteSwap[mode] = true;
\r
8964 else if ( mask & AFMT_S8) {
\r
8965 deviceFormat = AFMT_S8;
\r
8966 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8970 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8971 // This really shouldn't happen ...
\r
8973 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8974 errorText_ = errorStream_.str();
\r
8978 // Set the data format.
\r
8979 int temp = deviceFormat;
\r
8980 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8981 if ( result == -1 || deviceFormat != temp ) {
\r
8983 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8984 errorText_ = errorStream_.str();
\r
8988 // Attempt to set the buffer size. According to OSS, the minimum
\r
8989 // number of buffers is two. The supposed minimum buffer size is 16
\r
8990 // bytes, so that will be our lower bound. The argument to this
\r
8991 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8992 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8993 // We'll check the actual value used near the end of the setup
\r
8995 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8996 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8998 if ( options ) buffers = options->numberOfBuffers;
\r
8999 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9000 if ( buffers < 2 ) buffers = 3;
\r
9001 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9002 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9003 if ( result == -1 ) {
\r
9005 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9006 errorText_ = errorStream_.str();
\r
9009 stream_.nBuffers = buffers;
\r
9011 // Save buffer size (in sample frames).
\r
9012 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9013 stream_.bufferSize = *bufferSize;
\r
9015 // Set the sample rate.
\r
9016 int srate = sampleRate;
\r
9017 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9018 if ( result == -1 ) {
\r
9020 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9021 errorText_ = errorStream_.str();
\r
9025 // Verify the sample rate setup worked.
\r
9026 if ( abs( srate - sampleRate ) > 100 ) {
\r
9028 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9029 errorText_ = errorStream_.str();
\r
9032 stream_.sampleRate = sampleRate;
\r
9034 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9035 // We're doing duplex setup here.
\r
9036 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9037 stream_.nDeviceChannels[0] = deviceChannels;
\r
9040 // Set interleaving parameters.
\r
9041 stream_.userInterleaved = true;
\r
9042 stream_.deviceInterleaved[mode] = true;
\r
9043 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9044 stream_.userInterleaved = false;
\r
9046 // Set flags for buffer conversion
\r
9047 stream_.doConvertBuffer[mode] = false;
\r
9048 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9049 stream_.doConvertBuffer[mode] = true;
\r
9050 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9051 stream_.doConvertBuffer[mode] = true;
\r
9052 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9053 stream_.nUserChannels[mode] > 1 )
\r
9054 stream_.doConvertBuffer[mode] = true;
\r
9056 // Allocate the stream handles if necessary and then save.
\r
9057 if ( stream_.apiHandle == 0 ) {
\r
9059 handle = new OssHandle;
\r
9061 catch ( std::bad_alloc& ) {
\r
9062 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9066 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9067 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9071 stream_.apiHandle = (void *) handle;
\r
9074 handle = (OssHandle *) stream_.apiHandle;
\r
9076 handle->id[mode] = fd;
\r
9078 // Allocate necessary internal buffers.
\r
9079 unsigned long bufferBytes;
\r
9080 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9081 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9082 if ( stream_.userBuffer[mode] == NULL ) {
\r
9083 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9087 if ( stream_.doConvertBuffer[mode] ) {
\r
9089 bool makeBuffer = true;
\r
9090 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9091 if ( mode == INPUT ) {
\r
9092 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9093 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9094 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9098 if ( makeBuffer ) {
\r
9099 bufferBytes *= *bufferSize;
\r
9100 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9101 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9102 if ( stream_.deviceBuffer == NULL ) {
\r
9103 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9109 stream_.device[mode] = device;
\r
9110 stream_.state = STREAM_STOPPED;
\r
9112 // Setup the buffer conversion information structure.
\r
9113 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9115 // Setup thread if necessary.
\r
9116 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9117 // We had already set up an output stream.
\r
9118 stream_.mode = DUPLEX;
\r
9119 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9122 stream_.mode = mode;
\r
9124 // Setup callback thread.
\r
9125 stream_.callbackInfo.object = (void *) this;
\r
9127 // Set the thread attributes for joinable and realtime scheduling
\r
9128 // priority. The higher priority will only take affect if the
\r
9129 // program is run as root or suid.
\r
9130 pthread_attr_t attr;
\r
9131 pthread_attr_init( &attr );
\r
9132 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9133 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9134 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9135 struct sched_param param;
\r
9136 int priority = options->priority;
\r
9137 int min = sched_get_priority_min( SCHED_RR );
\r
9138 int max = sched_get_priority_max( SCHED_RR );
\r
9139 if ( priority < min ) priority = min;
\r
9140 else if ( priority > max ) priority = max;
\r
9141 param.sched_priority = priority;
\r
9142 pthread_attr_setschedparam( &attr, ¶m );
\r
9143 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9146 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9148 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9151 stream_.callbackInfo.isRunning = true;
\r
9152 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9153 pthread_attr_destroy( &attr );
\r
9155 stream_.callbackInfo.isRunning = false;
\r
9156 errorText_ = "RtApiOss::error creating callback thread!";
\r
9165 pthread_cond_destroy( &handle->runnable );
\r
9166 if ( handle->id[0] ) close( handle->id[0] );
\r
9167 if ( handle->id[1] ) close( handle->id[1] );
\r
9169 stream_.apiHandle = 0;
\r
9172 for ( int i=0; i<2; i++ ) {
\r
9173 if ( stream_.userBuffer[i] ) {
\r
9174 free( stream_.userBuffer[i] );
\r
9175 stream_.userBuffer[i] = 0;
\r
9179 if ( stream_.deviceBuffer ) {
\r
9180 free( stream_.deviceBuffer );
\r
9181 stream_.deviceBuffer = 0;
\r
9187 void RtApiOss :: closeStream()
\r
9189 if ( stream_.state == STREAM_CLOSED ) {
\r
9190 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9191 error( RtAudioError::WARNING );
\r
9195 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9196 stream_.callbackInfo.isRunning = false;
\r
9197 MUTEX_LOCK( &stream_.mutex );
\r
9198 if ( stream_.state == STREAM_STOPPED )
\r
9199 pthread_cond_signal( &handle->runnable );
\r
9200 MUTEX_UNLOCK( &stream_.mutex );
\r
9201 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9203 if ( stream_.state == STREAM_RUNNING ) {
\r
9204 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9205 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9207 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9208 stream_.state = STREAM_STOPPED;
\r
9212 pthread_cond_destroy( &handle->runnable );
\r
9213 if ( handle->id[0] ) close( handle->id[0] );
\r
9214 if ( handle->id[1] ) close( handle->id[1] );
\r
9216 stream_.apiHandle = 0;
\r
9219 for ( int i=0; i<2; i++ ) {
\r
9220 if ( stream_.userBuffer[i] ) {
\r
9221 free( stream_.userBuffer[i] );
\r
9222 stream_.userBuffer[i] = 0;
\r
9226 if ( stream_.deviceBuffer ) {
\r
9227 free( stream_.deviceBuffer );
\r
9228 stream_.deviceBuffer = 0;
\r
9231 stream_.mode = UNINITIALIZED;
\r
9232 stream_.state = STREAM_CLOSED;
\r
9235 void RtApiOss :: startStream()
\r
9238 if ( stream_.state == STREAM_RUNNING ) {
\r
9239 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9240 error( RtAudioError::WARNING );
\r
9244 MUTEX_LOCK( &stream_.mutex );
\r
9246 stream_.state = STREAM_RUNNING;
\r
9248 // No need to do anything else here ... OSS automatically starts
\r
9249 // when fed samples.
\r
9251 MUTEX_UNLOCK( &stream_.mutex );
\r
9253 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9254 pthread_cond_signal( &handle->runnable );
\r
9257 void RtApiOss :: stopStream()
\r
9260 if ( stream_.state == STREAM_STOPPED ) {
\r
9261 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9262 error( RtAudioError::WARNING );
\r
9266 MUTEX_LOCK( &stream_.mutex );
\r
9268 // The state might change while waiting on a mutex.
\r
9269 if ( stream_.state == STREAM_STOPPED ) {
\r
9270 MUTEX_UNLOCK( &stream_.mutex );
\r
9275 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9276 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9278 // Flush the output with zeros a few times.
\r
9281 RtAudioFormat format;
\r
9283 if ( stream_.doConvertBuffer[0] ) {
\r
9284 buffer = stream_.deviceBuffer;
\r
9285 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9286 format = stream_.deviceFormat[0];
\r
9289 buffer = stream_.userBuffer[0];
\r
9290 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9291 format = stream_.userFormat;
\r
9294 memset( buffer, 0, samples * formatBytes(format) );
\r
9295 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9296 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9297 if ( result == -1 ) {
\r
9298 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9299 error( RtAudioError::WARNING );
\r
9303 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9304 if ( result == -1 ) {
\r
9305 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9306 errorText_ = errorStream_.str();
\r
9309 handle->triggered = false;
\r
9312 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9313 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9314 if ( result == -1 ) {
\r
9315 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9316 errorText_ = errorStream_.str();
\r
9322 stream_.state = STREAM_STOPPED;
\r
9323 MUTEX_UNLOCK( &stream_.mutex );
\r
9325 if ( result != -1 ) return;
\r
9326 error( RtAudioError::SYSTEM_ERROR );
\r
9329 void RtApiOss :: abortStream()
\r
9332 if ( stream_.state == STREAM_STOPPED ) {
\r
9333 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9334 error( RtAudioError::WARNING );
\r
9338 MUTEX_LOCK( &stream_.mutex );
\r
9340 // The state might change while waiting on a mutex.
\r
9341 if ( stream_.state == STREAM_STOPPED ) {
\r
9342 MUTEX_UNLOCK( &stream_.mutex );
\r
9347 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9348 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9349 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9350 if ( result == -1 ) {
\r
9351 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9352 errorText_ = errorStream_.str();
\r
9355 handle->triggered = false;
\r
9358 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9359 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9360 if ( result == -1 ) {
\r
9361 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9362 errorText_ = errorStream_.str();
\r
9368 stream_.state = STREAM_STOPPED;
\r
9369 MUTEX_UNLOCK( &stream_.mutex );
\r
9371 if ( result != -1 ) return;
\r
9372 error( RtAudioError::SYSTEM_ERROR );
\r
9375 void RtApiOss :: callbackEvent()
\r
9377 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9378 if ( stream_.state == STREAM_STOPPED ) {
\r
9379 MUTEX_LOCK( &stream_.mutex );
\r
9380 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9381 if ( stream_.state != STREAM_RUNNING ) {
\r
9382 MUTEX_UNLOCK( &stream_.mutex );
\r
9385 MUTEX_UNLOCK( &stream_.mutex );
\r
9388 if ( stream_.state == STREAM_CLOSED ) {
\r
9389 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9390 error( RtAudioError::WARNING );
\r
9394 // Invoke user callback to get fresh output data.
\r
9395 int doStopStream = 0;
\r
9396 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9397 double streamTime = getStreamTime();
\r
9398 RtAudioStreamStatus status = 0;
\r
9399 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9400 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9401 handle->xrun[0] = false;
\r
9403 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9404 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9405 handle->xrun[1] = false;
\r
9407 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9408 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9409 if ( doStopStream == 2 ) {
\r
9410 this->abortStream();
\r
9414 MUTEX_LOCK( &stream_.mutex );
\r
9416 // The state might change while waiting on a mutex.
\r
9417 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9422 RtAudioFormat format;
\r
9424 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9426 // Setup parameters and do buffer conversion if necessary.
\r
9427 if ( stream_.doConvertBuffer[0] ) {
\r
9428 buffer = stream_.deviceBuffer;
\r
9429 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9430 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9431 format = stream_.deviceFormat[0];
\r
9434 buffer = stream_.userBuffer[0];
\r
9435 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9436 format = stream_.userFormat;
\r
9439 // Do byte swapping if necessary.
\r
9440 if ( stream_.doByteSwap[0] )
\r
9441 byteSwapBuffer( buffer, samples, format );
\r
9443 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9445 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9446 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9447 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9448 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9449 handle->triggered = true;
\r
9452 // Write samples to device.
\r
9453 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9455 if ( result == -1 ) {
\r
9456 // We'll assume this is an underrun, though there isn't a
\r
9457 // specific means for determining that.
\r
9458 handle->xrun[0] = true;
\r
9459 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9460 error( RtAudioError::WARNING );
\r
9461 // Continue on to input section.
\r
9465 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9467 // Setup parameters.
\r
9468 if ( stream_.doConvertBuffer[1] ) {
\r
9469 buffer = stream_.deviceBuffer;
\r
9470 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9471 format = stream_.deviceFormat[1];
\r
9474 buffer = stream_.userBuffer[1];
\r
9475 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9476 format = stream_.userFormat;
\r
9479 // Read samples from device.
\r
9480 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9482 if ( result == -1 ) {
\r
9483 // We'll assume this is an overrun, though there isn't a
\r
9484 // specific means for determining that.
\r
9485 handle->xrun[1] = true;
\r
9486 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9487 error( RtAudioError::WARNING );
\r
9491 // Do byte swapping if necessary.
\r
9492 if ( stream_.doByteSwap[1] )
\r
9493 byteSwapBuffer( buffer, samples, format );
\r
9495 // Do buffer conversion if necessary.
\r
9496 if ( stream_.doConvertBuffer[1] )
\r
9497 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9501 MUTEX_UNLOCK( &stream_.mutex );
\r
9503 RtApi::tickStreamTime();
\r
9504 if ( doStopStream == 1 ) this->stopStream();
\r
9507 static void *ossCallbackHandler( void *ptr )
\r
9509 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9510 RtApiOss *object = (RtApiOss *) info->object;
\r
9511 bool *isRunning = &info->isRunning;
\r
9513 while ( *isRunning == true ) {
\r
9514 pthread_testcancel();
\r
9515 object->callbackEvent();
\r
9518 pthread_exit( NULL );
\r
9521 //******************** End of __LINUX_OSS__ *********************//
\r
9525 // *************************************************** //
\r
9527 // Protected common (OS-independent) RtAudio methods.
\r
9529 // *************************************************** //
\r
9531 // This method can be modified to control the behavior of error
\r
9532 // message printing.
\r
9533 void RtApi :: error( RtAudioError::Type type )
\r
9535 errorStream_.str(""); // clear the ostringstream
\r
9537 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9538 if ( errorCallback ) {
\r
9539 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9541 if ( firstErrorOccurred_ )
\r
9544 firstErrorOccurred_ = true;
\r
9545 const std::string errorMessage = errorText_;
\r
9547 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9548 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9552 errorCallback( type, errorMessage );
\r
9553 firstErrorOccurred_ = false;
\r
9557 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9558 std::cerr << '\n' << errorText_ << "\n\n";
\r
9559 else if ( type != RtAudioError::WARNING )
\r
9560 throw( RtAudioError( errorText_, type ) );
\r
9563 void RtApi :: verifyStream()
\r
9565 if ( stream_.state == STREAM_CLOSED ) {
\r
9566 errorText_ = "RtApi:: a stream is not open!";
\r
9567 error( RtAudioError::INVALID_USE );
\r
9571 void RtApi :: clearStreamInfo()
\r
9573 stream_.mode = UNINITIALIZED;
\r
9574 stream_.state = STREAM_CLOSED;
\r
9575 stream_.sampleRate = 0;
\r
9576 stream_.bufferSize = 0;
\r
9577 stream_.nBuffers = 0;
\r
9578 stream_.userFormat = 0;
\r
9579 stream_.userInterleaved = true;
\r
9580 stream_.streamTime = 0.0;
\r
9581 stream_.apiHandle = 0;
\r
9582 stream_.deviceBuffer = 0;
\r
9583 stream_.callbackInfo.callback = 0;
\r
9584 stream_.callbackInfo.userData = 0;
\r
9585 stream_.callbackInfo.isRunning = false;
\r
9586 stream_.callbackInfo.errorCallback = 0;
\r
9587 for ( int i=0; i<2; i++ ) {
\r
9588 stream_.device[i] = 11111;
\r
9589 stream_.doConvertBuffer[i] = false;
\r
9590 stream_.deviceInterleaved[i] = true;
\r
9591 stream_.doByteSwap[i] = false;
\r
9592 stream_.nUserChannels[i] = 0;
\r
9593 stream_.nDeviceChannels[i] = 0;
\r
9594 stream_.channelOffset[i] = 0;
\r
9595 stream_.deviceFormat[i] = 0;
\r
9596 stream_.latency[i] = 0;
\r
9597 stream_.userBuffer[i] = 0;
\r
9598 stream_.convertInfo[i].channels = 0;
\r
9599 stream_.convertInfo[i].inJump = 0;
\r
9600 stream_.convertInfo[i].outJump = 0;
\r
9601 stream_.convertInfo[i].inFormat = 0;
\r
9602 stream_.convertInfo[i].outFormat = 0;
\r
9603 stream_.convertInfo[i].inOffset.clear();
\r
9604 stream_.convertInfo[i].outOffset.clear();
\r
9608 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9610 if ( format == RTAUDIO_SINT16 )
\r
9612 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9614 else if ( format == RTAUDIO_FLOAT64 )
\r
9616 else if ( format == RTAUDIO_SINT24 )
\r
9618 else if ( format == RTAUDIO_SINT8 )
\r
9621 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9622 error( RtAudioError::WARNING );
\r
9627 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9629 if ( mode == INPUT ) { // convert device to user buffer
\r
9630 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9631 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9632 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9633 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9635 else { // convert user to device buffer
\r
9636 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9637 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9638 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9639 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9642 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9643 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9645 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9647 // Set up the interleave/deinterleave offsets.
\r
9648 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9649 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9650 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9651 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9652 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9653 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9654 stream_.convertInfo[mode].inJump = 1;
\r
9658 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9659 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9660 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9661 stream_.convertInfo[mode].outJump = 1;
\r
9665 else { // no (de)interleaving
\r
9666 if ( stream_.userInterleaved ) {
\r
9667 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9668 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9669 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9673 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9674 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9675 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9676 stream_.convertInfo[mode].inJump = 1;
\r
9677 stream_.convertInfo[mode].outJump = 1;
\r
9682 // Add channel offset.
\r
9683 if ( firstChannel > 0 ) {
\r
9684 if ( stream_.deviceInterleaved[mode] ) {
\r
9685 if ( mode == OUTPUT ) {
\r
9686 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9687 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9690 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9691 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9695 if ( mode == OUTPUT ) {
\r
9696 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9697 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9700 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9701 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9707 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9709 // This function does format conversion, input/output channel compensation, and
\r
9710 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9711 // the lower three bytes of a 32-bit integer.
\r
9713 // Clear our device buffer when in/out duplex device channels are different
\r
9714 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9715 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9716 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9719 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9721 Float64 *out = (Float64 *)outBuffer;
\r
9723 if (info.inFormat == RTAUDIO_SINT8) {
\r
9724 signed char *in = (signed char *)inBuffer;
\r
9725 scale = 1.0 / 127.5;
\r
9726 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9727 for (j=0; j<info.channels; j++) {
\r
9728 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9729 out[info.outOffset[j]] += 0.5;
\r
9730 out[info.outOffset[j]] *= scale;
\r
9732 in += info.inJump;
\r
9733 out += info.outJump;
\r
9736 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9737 Int16 *in = (Int16 *)inBuffer;
\r
9738 scale = 1.0 / 32767.5;
\r
9739 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9740 for (j=0; j<info.channels; j++) {
\r
9741 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9742 out[info.outOffset[j]] += 0.5;
\r
9743 out[info.outOffset[j]] *= scale;
\r
9745 in += info.inJump;
\r
9746 out += info.outJump;
\r
9749 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9750 Int24 *in = (Int24 *)inBuffer;
\r
9751 scale = 1.0 / 8388607.5;
\r
9752 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9753 for (j=0; j<info.channels; j++) {
\r
9754 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9755 out[info.outOffset[j]] += 0.5;
\r
9756 out[info.outOffset[j]] *= scale;
\r
9758 in += info.inJump;
\r
9759 out += info.outJump;
\r
9762 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9763 Int32 *in = (Int32 *)inBuffer;
\r
9764 scale = 1.0 / 2147483647.5;
\r
9765 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9766 for (j=0; j<info.channels; j++) {
\r
9767 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9768 out[info.outOffset[j]] += 0.5;
\r
9769 out[info.outOffset[j]] *= scale;
\r
9771 in += info.inJump;
\r
9772 out += info.outJump;
\r
9775 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9776 Float32 *in = (Float32 *)inBuffer;
\r
9777 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9778 for (j=0; j<info.channels; j++) {
\r
9779 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9781 in += info.inJump;
\r
9782 out += info.outJump;
\r
9785 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9786 // Channel compensation and/or (de)interleaving only.
\r
9787 Float64 *in = (Float64 *)inBuffer;
\r
9788 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9789 for (j=0; j<info.channels; j++) {
\r
9790 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9792 in += info.inJump;
\r
9793 out += info.outJump;
\r
9797 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9799 Float32 *out = (Float32 *)outBuffer;
\r
9801 if (info.inFormat == RTAUDIO_SINT8) {
\r
9802 signed char *in = (signed char *)inBuffer;
\r
9803 scale = (Float32) ( 1.0 / 127.5 );
\r
9804 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9805 for (j=0; j<info.channels; j++) {
\r
9806 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9807 out[info.outOffset[j]] += 0.5;
\r
9808 out[info.outOffset[j]] *= scale;
\r
9810 in += info.inJump;
\r
9811 out += info.outJump;
\r
9814 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9815 Int16 *in = (Int16 *)inBuffer;
\r
9816 scale = (Float32) ( 1.0 / 32767.5 );
\r
9817 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9818 for (j=0; j<info.channels; j++) {
\r
9819 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9820 out[info.outOffset[j]] += 0.5;
\r
9821 out[info.outOffset[j]] *= scale;
\r
9823 in += info.inJump;
\r
9824 out += info.outJump;
\r
9827 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9828 Int24 *in = (Int24 *)inBuffer;
\r
9829 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9830 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9831 for (j=0; j<info.channels; j++) {
\r
9832 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9833 out[info.outOffset[j]] += 0.5;
\r
9834 out[info.outOffset[j]] *= scale;
\r
9836 in += info.inJump;
\r
9837 out += info.outJump;
\r
9840 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9841 Int32 *in = (Int32 *)inBuffer;
\r
9842 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9843 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9844 for (j=0; j<info.channels; j++) {
\r
9845 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9846 out[info.outOffset[j]] += 0.5;
\r
9847 out[info.outOffset[j]] *= scale;
\r
9849 in += info.inJump;
\r
9850 out += info.outJump;
\r
9853 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9854 // Channel compensation and/or (de)interleaving only.
\r
9855 Float32 *in = (Float32 *)inBuffer;
\r
9856 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9857 for (j=0; j<info.channels; j++) {
\r
9858 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9860 in += info.inJump;
\r
9861 out += info.outJump;
\r
9864 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9865 Float64 *in = (Float64 *)inBuffer;
\r
9866 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9867 for (j=0; j<info.channels; j++) {
\r
9868 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9870 in += info.inJump;
\r
9871 out += info.outJump;
\r
9875 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9876 Int32 *out = (Int32 *)outBuffer;
\r
9877 if (info.inFormat == RTAUDIO_SINT8) {
\r
9878 signed char *in = (signed char *)inBuffer;
\r
9879 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9880 for (j=0; j<info.channels; j++) {
\r
9881 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9882 out[info.outOffset[j]] <<= 24;
\r
9884 in += info.inJump;
\r
9885 out += info.outJump;
\r
9888 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9889 Int16 *in = (Int16 *)inBuffer;
\r
9890 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9891 for (j=0; j<info.channels; j++) {
\r
9892 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9893 out[info.outOffset[j]] <<= 16;
\r
9895 in += info.inJump;
\r
9896 out += info.outJump;
\r
9899 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9900 Int24 *in = (Int24 *)inBuffer;
\r
9901 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9902 for (j=0; j<info.channels; j++) {
\r
9903 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9904 out[info.outOffset[j]] <<= 8;
\r
9906 in += info.inJump;
\r
9907 out += info.outJump;
\r
9910 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9911 // Channel compensation and/or (de)interleaving only.
\r
9912 Int32 *in = (Int32 *)inBuffer;
\r
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9914 for (j=0; j<info.channels; j++) {
\r
9915 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9917 in += info.inJump;
\r
9918 out += info.outJump;
\r
9921 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9922 Float32 *in = (Float32 *)inBuffer;
\r
9923 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9924 for (j=0; j<info.channels; j++) {
\r
9925 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9927 in += info.inJump;
\r
9928 out += info.outJump;
\r
9931 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9932 Float64 *in = (Float64 *)inBuffer;
\r
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9934 for (j=0; j<info.channels; j++) {
\r
9935 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9937 in += info.inJump;
\r
9938 out += info.outJump;
\r
9942 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9943 Int24 *out = (Int24 *)outBuffer;
\r
9944 if (info.inFormat == RTAUDIO_SINT8) {
\r
9945 signed char *in = (signed char *)inBuffer;
\r
9946 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9947 for (j=0; j<info.channels; j++) {
\r
9948 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9949 //out[info.outOffset[j]] <<= 16;
\r
9951 in += info.inJump;
\r
9952 out += info.outJump;
\r
9955 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9956 Int16 *in = (Int16 *)inBuffer;
\r
9957 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9958 for (j=0; j<info.channels; j++) {
\r
9959 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9960 //out[info.outOffset[j]] <<= 8;
\r
9962 in += info.inJump;
\r
9963 out += info.outJump;
\r
9966 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9967 // Channel compensation and/or (de)interleaving only.
\r
9968 Int24 *in = (Int24 *)inBuffer;
\r
9969 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9970 for (j=0; j<info.channels; j++) {
\r
9971 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9973 in += info.inJump;
\r
9974 out += info.outJump;
\r
9977 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9978 Int32 *in = (Int32 *)inBuffer;
\r
9979 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9980 for (j=0; j<info.channels; j++) {
\r
9981 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9982 //out[info.outOffset[j]] >>= 8;
\r
9984 in += info.inJump;
\r
9985 out += info.outJump;
\r
9988 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9989 Float32 *in = (Float32 *)inBuffer;
\r
9990 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9991 for (j=0; j<info.channels; j++) {
\r
9992 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9994 in += info.inJump;
\r
9995 out += info.outJump;
\r
9998 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9999 Float64 *in = (Float64 *)inBuffer;
\r
10000 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10001 for (j=0; j<info.channels; j++) {
\r
10002 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10004 in += info.inJump;
\r
10005 out += info.outJump;
\r
10009 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10010 Int16 *out = (Int16 *)outBuffer;
\r
10011 if (info.inFormat == RTAUDIO_SINT8) {
\r
10012 signed char *in = (signed char *)inBuffer;
\r
10013 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10014 for (j=0; j<info.channels; j++) {
\r
10015 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10016 out[info.outOffset[j]] <<= 8;
\r
10018 in += info.inJump;
\r
10019 out += info.outJump;
\r
10022 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10023 // Channel compensation and/or (de)interleaving only.
\r
10024 Int16 *in = (Int16 *)inBuffer;
\r
10025 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10026 for (j=0; j<info.channels; j++) {
\r
10027 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10029 in += info.inJump;
\r
10030 out += info.outJump;
\r
10033 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10034 Int24 *in = (Int24 *)inBuffer;
\r
10035 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10036 for (j=0; j<info.channels; j++) {
\r
10037 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10039 in += info.inJump;
\r
10040 out += info.outJump;
\r
10043 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10044 Int32 *in = (Int32 *)inBuffer;
\r
10045 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10046 for (j=0; j<info.channels; j++) {
\r
10047 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10049 in += info.inJump;
\r
10050 out += info.outJump;
\r
10053 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10054 Float32 *in = (Float32 *)inBuffer;
\r
10055 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10056 for (j=0; j<info.channels; j++) {
\r
10057 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10059 in += info.inJump;
\r
10060 out += info.outJump;
\r
10063 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10064 Float64 *in = (Float64 *)inBuffer;
\r
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10066 for (j=0; j<info.channels; j++) {
\r
10067 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10069 in += info.inJump;
\r
10070 out += info.outJump;
\r
10074 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10075 signed char *out = (signed char *)outBuffer;
\r
10076 if (info.inFormat == RTAUDIO_SINT8) {
\r
10077 // Channel compensation and/or (de)interleaving only.
\r
10078 signed char *in = (signed char *)inBuffer;
\r
10079 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10080 for (j=0; j<info.channels; j++) {
\r
10081 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10083 in += info.inJump;
\r
10084 out += info.outJump;
\r
10087 if (info.inFormat == RTAUDIO_SINT16) {
\r
10088 Int16 *in = (Int16 *)inBuffer;
\r
10089 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10090 for (j=0; j<info.channels; j++) {
\r
10091 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10093 in += info.inJump;
\r
10094 out += info.outJump;
\r
10097 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10098 Int24 *in = (Int24 *)inBuffer;
\r
10099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10100 for (j=0; j<info.channels; j++) {
\r
10101 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10103 in += info.inJump;
\r
10104 out += info.outJump;
\r
10107 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10108 Int32 *in = (Int32 *)inBuffer;
\r
10109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10110 for (j=0; j<info.channels; j++) {
\r
10111 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10113 in += info.inJump;
\r
10114 out += info.outJump;
\r
10117 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10118 Float32 *in = (Float32 *)inBuffer;
\r
10119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10120 for (j=0; j<info.channels; j++) {
\r
10121 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10123 in += info.inJump;
\r
10124 out += info.outJump;
\r
10127 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10128 Float64 *in = (Float64 *)inBuffer;
\r
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10130 for (j=0; j<info.channels; j++) {
\r
10131 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10133 in += info.inJump;
\r
10134 out += info.outJump;
\r
10140 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10141 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10142 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10144 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10146 register char val;
\r
10147 register char *ptr;
\r
10150 if ( format == RTAUDIO_SINT16 ) {
\r
10151 for ( unsigned int i=0; i<samples; i++ ) {
\r
10152 // Swap 1st and 2nd bytes.
\r
10154 *(ptr) = *(ptr+1);
\r
10157 // Increment 2 bytes.
\r
10161 else if ( format == RTAUDIO_SINT32 ||
\r
10162 format == RTAUDIO_FLOAT32 ) {
\r
10163 for ( unsigned int i=0; i<samples; i++ ) {
\r
10164 // Swap 1st and 4th bytes.
\r
10166 *(ptr) = *(ptr+3);
\r
10169 // Swap 2nd and 3rd bytes.
\r
10172 *(ptr) = *(ptr+1);
\r
10175 // Increment 3 more bytes.
\r
10179 else if ( format == RTAUDIO_SINT24 ) {
\r
10180 for ( unsigned int i=0; i<samples; i++ ) {
\r
10181 // Swap 1st and 3rd bytes.
\r
10183 *(ptr) = *(ptr+2);
\r
10186 // Increment 2 more bytes.
\r
10190 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10191 for ( unsigned int i=0; i<samples; i++ ) {
\r
10192 // Swap 1st and 8th bytes
\r
10194 *(ptr) = *(ptr+7);
\r
10197 // Swap 2nd and 7th bytes
\r
10200 *(ptr) = *(ptr+5);
\r
10203 // Swap 3rd and 6th bytes
\r
10206 *(ptr) = *(ptr+3);
\r
10209 // Swap 4th and 5th bytes
\r
10212 *(ptr) = *(ptr+1);
\r
10215 // Increment 5 more bytes.
\r
10221 // Indentation settings for Vim and Emacs
\r
10223 // Local Variables:
\r
10224 // c-basic-offset: 2
\r
10225 // indent-tabs-mode: nil
\r
10228 // vim: et sts=2 sw=2
\r