1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1409 if ( stream_.state == STREAM_RUNNING )
\r
1410 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1412 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1414 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1415 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1419 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1420 if ( stream_.state == STREAM_RUNNING )
\r
1421 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1422 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1423 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1425 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1426 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1430 for ( int i=0; i<2; i++ ) {
\r
1431 if ( stream_.userBuffer[i] ) {
\r
1432 free( stream_.userBuffer[i] );
\r
1433 stream_.userBuffer[i] = 0;
\r
1437 if ( stream_.deviceBuffer ) {
\r
1438 free( stream_.deviceBuffer );
\r
1439 stream_.deviceBuffer = 0;
\r
1442 // Destroy pthread condition variable.
\r
1443 pthread_cond_destroy( &handle->condition );
\r
1445 stream_.apiHandle = 0;
\r
1447 stream_.mode = UNINITIALIZED;
\r
1448 stream_.state = STREAM_CLOSED;
\r
1451 void RtApiCore :: startStream( void )
\r
1454 if ( stream_.state == STREAM_RUNNING ) {
\r
1455 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1456 error( RtAudioError::WARNING );
\r
1460 OSStatus result = noErr;
\r
1461 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1462 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1464 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1465 if ( result != noErr ) {
\r
1466 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1467 errorText_ = errorStream_.str();
\r
1472 if ( stream_.mode == INPUT ||
\r
1473 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1475 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1476 if ( result != noErr ) {
\r
1477 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1478 errorText_ = errorStream_.str();
\r
1483 handle->drainCounter = 0;
\r
1484 handle->internalDrain = false;
\r
1485 stream_.state = STREAM_RUNNING;
\r
1488 if ( result == noErr ) return;
\r
1489 error( RtAudioError::SYSTEM_ERROR );
\r
1492 void RtApiCore :: stopStream( void )
\r
1495 if ( stream_.state == STREAM_STOPPED ) {
\r
1496 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1497 error( RtAudioError::WARNING );
\r
1501 OSStatus result = noErr;
\r
1502 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1503 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1505 if ( handle->drainCounter == 0 ) {
\r
1506 handle->drainCounter = 2;
\r
1507 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1510 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1511 if ( result != noErr ) {
\r
1512 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1513 errorText_ = errorStream_.str();
\r
1518 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1520 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1521 if ( result != noErr ) {
\r
1522 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1523 errorText_ = errorStream_.str();
\r
1528 stream_.state = STREAM_STOPPED;
\r
1531 if ( result == noErr ) return;
\r
1532 error( RtAudioError::SYSTEM_ERROR );
\r
1535 void RtApiCore :: abortStream( void )
\r
1538 if ( stream_.state == STREAM_STOPPED ) {
\r
1539 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1540 error( RtAudioError::WARNING );
\r
1544 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1545 handle->drainCounter = 2;
\r
1550 // This function will be called by a spawned thread when the user
\r
1551 // callback function signals that the stream should be stopped or
\r
1552 // aborted. It is better to handle it this way because the
\r
1553 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1554 // function is called.
\r
1555 static void *coreStopStream( void *ptr )
\r
1557 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1558 RtApiCore *object = (RtApiCore *) info->object;
\r
1560 object->stopStream();
\r
1561 pthread_exit( NULL );
\r
1564 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1565 const AudioBufferList *inBufferList,
\r
1566 const AudioBufferList *outBufferList )
\r
1568 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1569 if ( stream_.state == STREAM_CLOSED ) {
\r
1570 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1571 error( RtAudioError::WARNING );
\r
1575 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1576 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1578 // Check if we were draining the stream and signal is finished.
\r
1579 if ( handle->drainCounter > 3 ) {
\r
1580 ThreadHandle threadId;
\r
1582 stream_.state = STREAM_STOPPING;
\r
1583 if ( handle->internalDrain == true )
\r
1584 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1585 else // external call to stopStream()
\r
1586 pthread_cond_signal( &handle->condition );
\r
1590 AudioDeviceID outputDevice = handle->id[0];
\r
1592 // Invoke user callback to get fresh output data UNLESS we are
\r
1593 // draining stream or duplex mode AND the input/output devices are
\r
1594 // different AND this function is called for the input device.
\r
1595 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1596 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1597 double streamTime = getStreamTime();
\r
1598 RtAudioStreamStatus status = 0;
\r
1599 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1600 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1601 handle->xrun[0] = false;
\r
1603 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1604 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1605 handle->xrun[1] = false;
\r
1608 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1609 stream_.bufferSize, streamTime, status, info->userData );
\r
1610 if ( cbReturnValue == 2 ) {
\r
1611 stream_.state = STREAM_STOPPING;
\r
1612 handle->drainCounter = 2;
\r
1616 else if ( cbReturnValue == 1 ) {
\r
1617 handle->drainCounter = 1;
\r
1618 handle->internalDrain = true;
\r
1622 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1624 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1626 if ( handle->nStreams[0] == 1 ) {
\r
1627 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1629 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1631 else { // fill multiple streams with zeros
\r
1632 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1633 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1635 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1639 else if ( handle->nStreams[0] == 1 ) {
\r
1640 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1641 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1642 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1644 else { // copy from user buffer
\r
1645 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1646 stream_.userBuffer[0],
\r
1647 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1650 else { // fill multiple streams
\r
1651 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1652 if ( stream_.doConvertBuffer[0] ) {
\r
1653 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1654 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1657 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1658 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1659 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1660 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1661 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1664 else { // fill multiple multi-channel streams with interleaved data
\r
1665 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1666 Float32 *out, *in;
\r
1668 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1669 UInt32 inChannels = stream_.nUserChannels[0];
\r
1670 if ( stream_.doConvertBuffer[0] ) {
\r
1671 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1672 inChannels = stream_.nDeviceChannels[0];
\r
1675 if ( inInterleaved ) inOffset = 1;
\r
1676 else inOffset = stream_.bufferSize;
\r
1678 channelsLeft = inChannels;
\r
1679 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1681 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1682 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1685 // Account for possible channel offset in first stream
\r
1686 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1687 streamChannels -= stream_.channelOffset[0];
\r
1688 outJump = stream_.channelOffset[0];
\r
1692 // Account for possible unfilled channels at end of the last stream
\r
1693 if ( streamChannels > channelsLeft ) {
\r
1694 outJump = streamChannels - channelsLeft;
\r
1695 streamChannels = channelsLeft;
\r
1698 // Determine input buffer offsets and skips
\r
1699 if ( inInterleaved ) {
\r
1700 inJump = inChannels;
\r
1701 in += inChannels - channelsLeft;
\r
1705 in += (inChannels - channelsLeft) * inOffset;
\r
1708 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1709 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1710 *out++ = in[j*inOffset];
\r
1715 channelsLeft -= streamChannels;
\r
1721 // Don't bother draining input
\r
1722 if ( handle->drainCounter ) {
\r
1723 handle->drainCounter++;
\r
1727 AudioDeviceID inputDevice;
\r
1728 inputDevice = handle->id[1];
\r
1729 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1731 if ( handle->nStreams[1] == 1 ) {
\r
1732 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1733 convertBuffer( stream_.userBuffer[1],
\r
1734 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1735 stream_.convertInfo[1] );
\r
1737 else { // copy to user buffer
\r
1738 memcpy( stream_.userBuffer[1],
\r
1739 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1740 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1743 else { // read from multiple streams
\r
1744 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1745 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1747 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1748 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1749 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1750 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1751 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1754 else { // read from multiple multi-channel streams
\r
1755 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1756 Float32 *out, *in;
\r
1758 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1759 UInt32 outChannels = stream_.nUserChannels[1];
\r
1760 if ( stream_.doConvertBuffer[1] ) {
\r
1761 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1762 outChannels = stream_.nDeviceChannels[1];
\r
1765 if ( outInterleaved ) outOffset = 1;
\r
1766 else outOffset = stream_.bufferSize;
\r
1768 channelsLeft = outChannels;
\r
1769 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1771 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1772 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1775 // Account for possible channel offset in first stream
\r
1776 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1777 streamChannels -= stream_.channelOffset[1];
\r
1778 inJump = stream_.channelOffset[1];
\r
1782 // Account for possible unread channels at end of the last stream
\r
1783 if ( streamChannels > channelsLeft ) {
\r
1784 inJump = streamChannels - channelsLeft;
\r
1785 streamChannels = channelsLeft;
\r
1788 // Determine output buffer offsets and skips
\r
1789 if ( outInterleaved ) {
\r
1790 outJump = outChannels;
\r
1791 out += outChannels - channelsLeft;
\r
1795 out += (outChannels - channelsLeft) * outOffset;
\r
1798 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1799 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1800 out[j*outOffset] = *in++;
\r
1805 channelsLeft -= streamChannels;
\r
1809 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1810 convertBuffer( stream_.userBuffer[1],
\r
1811 stream_.deviceBuffer,
\r
1812 stream_.convertInfo[1] );
\r
1818 //MUTEX_UNLOCK( &stream_.mutex );
\r
1820 RtApi::tickStreamTime();
\r
1824 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1828 case kAudioHardwareNotRunningError:
\r
1829 return "kAudioHardwareNotRunningError";
\r
1831 case kAudioHardwareUnspecifiedError:
\r
1832 return "kAudioHardwareUnspecifiedError";
\r
1834 case kAudioHardwareUnknownPropertyError:
\r
1835 return "kAudioHardwareUnknownPropertyError";
\r
1837 case kAudioHardwareBadPropertySizeError:
\r
1838 return "kAudioHardwareBadPropertySizeError";
\r
1840 case kAudioHardwareIllegalOperationError:
\r
1841 return "kAudioHardwareIllegalOperationError";
\r
1843 case kAudioHardwareBadObjectError:
\r
1844 return "kAudioHardwareBadObjectError";
\r
1846 case kAudioHardwareBadDeviceError:
\r
1847 return "kAudioHardwareBadDeviceError";
\r
1849 case kAudioHardwareBadStreamError:
\r
1850 return "kAudioHardwareBadStreamError";
\r
1852 case kAudioHardwareUnsupportedOperationError:
\r
1853 return "kAudioHardwareUnsupportedOperationError";
\r
1855 case kAudioDeviceUnsupportedFormatError:
\r
1856 return "kAudioDeviceUnsupportedFormatError";
\r
1858 case kAudioDevicePermissionsError:
\r
1859 return "kAudioDevicePermissionsError";
\r
1862 return "CoreAudio unknown error";
\r
1866 //******************** End of __MACOSX_CORE__ *********************//
\r
1869 #if defined(__UNIX_JACK__)
\r
1871 // JACK is a low-latency audio server, originally written for the
\r
1872 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1873 // connect a number of different applications to an audio device, as
\r
1874 // well as allowing them to share audio between themselves.
\r
1876 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1877 // have ports connected to the server. The JACK server is typically
\r
1878 // started in a terminal as follows:
\r
1880 // .jackd -d alsa -d hw:0
\r
1882 // or through an interface program such as qjackctl. Many of the
\r
1883 // parameters normally set for a stream are fixed by the JACK server
\r
1884 // and can be specified when the JACK server is started. In
\r
1887 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1889 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1890 // frames, and number of buffers = 4. Once the server is running, it
\r
1891 // is not possible to override these values. If the values are not
\r
1892 // specified in the command-line, the JACK server uses default values.
\r
1894 // The JACK server does not have to be running when an instance of
\r
1895 // RtApiJack is created, though the function getDeviceCount() will
\r
1896 // report 0 devices found until JACK has been started. When no
\r
1897 // devices are available (i.e., the JACK server is not running), a
\r
1898 // stream cannot be opened.
\r
1900 #include <jack/jack.h>
\r
1901 #include <unistd.h>
\r
1904 // A structure to hold various information related to the Jack API
\r
1905 // implementation.
\r
1906 struct JackHandle {
\r
1907 jack_client_t *client;
\r
1908 jack_port_t **ports[2];
\r
1909 std::string deviceName[2];
\r
1911 pthread_cond_t condition;
\r
1912 int drainCounter; // Tracks callback counts when draining
\r
1913 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1916 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1919 static void jackSilentError( const char * ) {};
\r
1921 RtApiJack :: RtApiJack()
\r
1923 // Nothing to do here.
\r
1924 #if !defined(__RTAUDIO_DEBUG__)
\r
1925 // Turn off Jack's internal error reporting.
\r
1926 jack_set_error_function( &jackSilentError );
\r
1930 RtApiJack :: ~RtApiJack()
\r
1932 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1935 unsigned int RtApiJack :: getDeviceCount( void )
\r
1937 // See if we can become a jack client.
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1941 if ( client == 0 ) return 0;
\r
1943 const char **ports;
\r
1944 std::string port, previousPort;
\r
1945 unsigned int nChannels = 0, nDevices = 0;
\r
1946 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1948 // Parse the port names up to the first colon (:).
\r
1949 size_t iColon = 0;
\r
1951 port = (char *) ports[ nChannels ];
\r
1952 iColon = port.find(":");
\r
1953 if ( iColon != std::string::npos ) {
\r
1954 port = port.substr( 0, iColon + 1 );
\r
1955 if ( port != previousPort ) {
\r
1957 previousPort = port;
\r
1960 } while ( ports[++nChannels] );
\r
1964 jack_client_close( client );
\r
1968 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1970 RtAudio::DeviceInfo info;
\r
1971 info.probed = false;
\r
1973 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1974 jack_status_t *status = NULL;
\r
1975 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1976 if ( client == 0 ) {
\r
1977 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1978 error( RtAudioError::WARNING );
\r
1982 const char **ports;
\r
1983 std::string port, previousPort;
\r
1984 unsigned int nPorts = 0, nDevices = 0;
\r
1985 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1987 // Parse the port names up to the first colon (:).
\r
1988 size_t iColon = 0;
\r
1990 port = (char *) ports[ nPorts ];
\r
1991 iColon = port.find(":");
\r
1992 if ( iColon != std::string::npos ) {
\r
1993 port = port.substr( 0, iColon );
\r
1994 if ( port != previousPort ) {
\r
1995 if ( nDevices == device ) info.name = port;
\r
1997 previousPort = port;
\r
2000 } while ( ports[++nPorts] );
\r
2004 if ( device >= nDevices ) {
\r
2005 jack_client_close( client );
\r
2006 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2007 error( RtAudioError::INVALID_USE );
\r
2011 // Get the current jack server sample rate.
\r
2012 info.sampleRates.clear();
\r
2014 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2015 info.sampleRates.push_back( info.preferredSampleRate );
\r
2017 // Count the available ports containing the client name as device
\r
2018 // channels. Jack "input ports" equal RtAudio output channels.
\r
2019 unsigned int nChannels = 0;
\r
2020 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2022 while ( ports[ nChannels ] ) nChannels++;
\r
2024 info.outputChannels = nChannels;
\r
2027 // Jack "output ports" equal RtAudio input channels.
\r
2029 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2031 while ( ports[ nChannels ] ) nChannels++;
\r
2033 info.inputChannels = nChannels;
\r
2036 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2037 jack_client_close(client);
\r
2038 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2039 error( RtAudioError::WARNING );
\r
2043 // If device opens for both playback and capture, we determine the channels.
\r
2044 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2045 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2047 // Jack always uses 32-bit floats.
\r
2048 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2050 // Jack doesn't provide default devices so we'll use the first available one.
\r
2051 if ( device == 0 && info.outputChannels > 0 )
\r
2052 info.isDefaultOutput = true;
\r
2053 if ( device == 0 && info.inputChannels > 0 )
\r
2054 info.isDefaultInput = true;
\r
2056 jack_client_close(client);
\r
2057 info.probed = true;
\r
2061 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2063 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2065 RtApiJack *object = (RtApiJack *) info->object;
\r
2066 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2071 // This function will be called by a spawned thread when the Jack
\r
2072 // server signals that it is shutting down. It is necessary to handle
\r
2073 // it this way because the jackShutdown() function must return before
\r
2074 // the jack_deactivate() function (in closeStream()) will return.
\r
2075 static void *jackCloseStream( void *ptr )
\r
2077 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2078 RtApiJack *object = (RtApiJack *) info->object;
\r
2080 object->closeStream();
\r
2082 pthread_exit( NULL );
\r
2084 static void jackShutdown( void *infoPointer )
\r
2086 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2087 RtApiJack *object = (RtApiJack *) info->object;
\r
2089 // Check current stream state. If stopped, then we'll assume this
\r
2090 // was called as a result of a call to RtApiJack::stopStream (the
\r
2091 // deactivation of a client handle causes this function to be called).
\r
2092 // If not, we'll assume the Jack server is shutting down or some
\r
2093 // other problem occurred and we should close the stream.
\r
2094 if ( object->isStreamRunning() == false ) return;
\r
2096 ThreadHandle threadId;
\r
2097 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2098 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2101 static int jackXrun( void *infoPointer )
\r
2103 JackHandle *handle = (JackHandle *) infoPointer;
\r
2105 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2106 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2111 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2112 unsigned int firstChannel, unsigned int sampleRate,
\r
2113 RtAudioFormat format, unsigned int *bufferSize,
\r
2114 RtAudio::StreamOptions *options )
\r
2116 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2118 // Look for jack server and try to become a client (only do once per stream).
\r
2119 jack_client_t *client = 0;
\r
2120 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2121 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2122 jack_status_t *status = NULL;
\r
2123 if ( options && !options->streamName.empty() )
\r
2124 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2126 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2127 if ( client == 0 ) {
\r
2128 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2129 error( RtAudioError::WARNING );
\r
2134 // The handle must have been created on an earlier pass.
\r
2135 client = handle->client;
\r
2138 const char **ports;
\r
2139 std::string port, previousPort, deviceName;
\r
2140 unsigned int nPorts = 0, nDevices = 0;
\r
2141 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2143 // Parse the port names up to the first colon (:).
\r
2144 size_t iColon = 0;
\r
2146 port = (char *) ports[ nPorts ];
\r
2147 iColon = port.find(":");
\r
2148 if ( iColon != std::string::npos ) {
\r
2149 port = port.substr( 0, iColon );
\r
2150 if ( port != previousPort ) {
\r
2151 if ( nDevices == device ) deviceName = port;
\r
2153 previousPort = port;
\r
2156 } while ( ports[++nPorts] );
\r
2160 if ( device >= nDevices ) {
\r
2161 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2165 // Count the available ports containing the client name as device
\r
2166 // channels. Jack "input ports" equal RtAudio output channels.
\r
2167 unsigned int nChannels = 0;
\r
2168 unsigned long flag = JackPortIsInput;
\r
2169 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2170 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2172 while ( ports[ nChannels ] ) nChannels++;
\r
2176 // Compare the jack ports for specified client to the requested number of channels.
\r
2177 if ( nChannels < (channels + firstChannel) ) {
\r
2178 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2179 errorText_ = errorStream_.str();
\r
2183 // Check the jack server sample rate.
\r
2184 unsigned int jackRate = jack_get_sample_rate( client );
\r
2185 if ( sampleRate != jackRate ) {
\r
2186 jack_client_close( client );
\r
2187 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2188 errorText_ = errorStream_.str();
\r
2191 stream_.sampleRate = jackRate;
\r
2193 // Get the latency of the JACK port.
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2195 if ( ports[ firstChannel ] ) {
\r
2196 // Added by Ge Wang
\r
2197 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2198 // the range (usually the min and max are equal)
\r
2199 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2200 // get the latency range
\r
2201 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2202 // be optimistic, use the min!
\r
2203 stream_.latency[mode] = latrange.min;
\r
2204 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2208 // The jack server always uses 32-bit floating-point data.
\r
2209 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2210 stream_.userFormat = format;
\r
2212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2213 else stream_.userInterleaved = true;
\r
2215 // Jack always uses non-interleaved buffers.
\r
2216 stream_.deviceInterleaved[mode] = false;
\r
2218 // Jack always provides host byte-ordered data.
\r
2219 stream_.doByteSwap[mode] = false;
\r
2221 // Get the buffer size. The buffer size and number of buffers
\r
2222 // (periods) is set when the jack server is started.
\r
2223 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2224 *bufferSize = stream_.bufferSize;
\r
2226 stream_.nDeviceChannels[mode] = channels;
\r
2227 stream_.nUserChannels[mode] = channels;
\r
2229 // Set flags for buffer conversion.
\r
2230 stream_.doConvertBuffer[mode] = false;
\r
2231 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2232 stream_.doConvertBuffer[mode] = true;
\r
2233 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2234 stream_.nUserChannels[mode] > 1 )
\r
2235 stream_.doConvertBuffer[mode] = true;
\r
2237 // Allocate our JackHandle structure for the stream.
\r
2238 if ( handle == 0 ) {
\r
2240 handle = new JackHandle;
\r
2242 catch ( std::bad_alloc& ) {
\r
2243 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2247 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2248 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2251 stream_.apiHandle = (void *) handle;
\r
2252 handle->client = client;
\r
2254 handle->deviceName[mode] = deviceName;
\r
2256 // Allocate necessary internal buffers.
\r
2257 unsigned long bufferBytes;
\r
2258 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2259 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2260 if ( stream_.userBuffer[mode] == NULL ) {
\r
2261 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2265 if ( stream_.doConvertBuffer[mode] ) {
\r
2267 bool makeBuffer = true;
\r
2268 if ( mode == OUTPUT )
\r
2269 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2270 else { // mode == INPUT
\r
2271 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2272 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2273 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2274 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2278 if ( makeBuffer ) {
\r
2279 bufferBytes *= *bufferSize;
\r
2280 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2281 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2282 if ( stream_.deviceBuffer == NULL ) {
\r
2283 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2289 // Allocate memory for the Jack ports (channels) identifiers.
\r
2290 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2291 if ( handle->ports[mode] == NULL ) {
\r
2292 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2296 stream_.device[mode] = device;
\r
2297 stream_.channelOffset[mode] = firstChannel;
\r
2298 stream_.state = STREAM_STOPPED;
\r
2299 stream_.callbackInfo.object = (void *) this;
\r
2301 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2302 // We had already set up the stream for output.
\r
2303 stream_.mode = DUPLEX;
\r
2305 stream_.mode = mode;
\r
2306 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2307 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2308 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2311 // Register our ports.
\r
2313 if ( mode == OUTPUT ) {
\r
2314 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2315 snprintf( label, 64, "outport %d", i );
\r
2316 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2317 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2321 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2322 snprintf( label, 64, "inport %d", i );
\r
2323 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2324 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2328 // Setup the buffer conversion information structure. We don't use
\r
2329 // buffers to do channel offsets, so we override that parameter
\r
2331 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2337 pthread_cond_destroy( &handle->condition );
\r
2338 jack_client_close( handle->client );
\r
2340 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2341 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2344 stream_.apiHandle = 0;
\r
2347 for ( int i=0; i<2; i++ ) {
\r
2348 if ( stream_.userBuffer[i] ) {
\r
2349 free( stream_.userBuffer[i] );
\r
2350 stream_.userBuffer[i] = 0;
\r
2354 if ( stream_.deviceBuffer ) {
\r
2355 free( stream_.deviceBuffer );
\r
2356 stream_.deviceBuffer = 0;
\r
2362 void RtApiJack :: closeStream( void )
\r
2364 if ( stream_.state == STREAM_CLOSED ) {
\r
2365 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2366 error( RtAudioError::WARNING );
\r
2370 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2373 if ( stream_.state == STREAM_RUNNING )
\r
2374 jack_deactivate( handle->client );
\r
2376 jack_client_close( handle->client );
\r
2380 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2381 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2382 pthread_cond_destroy( &handle->condition );
\r
2384 stream_.apiHandle = 0;
\r
2387 for ( int i=0; i<2; i++ ) {
\r
2388 if ( stream_.userBuffer[i] ) {
\r
2389 free( stream_.userBuffer[i] );
\r
2390 stream_.userBuffer[i] = 0;
\r
2394 if ( stream_.deviceBuffer ) {
\r
2395 free( stream_.deviceBuffer );
\r
2396 stream_.deviceBuffer = 0;
\r
2399 stream_.mode = UNINITIALIZED;
\r
2400 stream_.state = STREAM_CLOSED;
\r
2403 void RtApiJack :: startStream( void )
\r
2406 if ( stream_.state == STREAM_RUNNING ) {
\r
2407 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2408 error( RtAudioError::WARNING );
\r
2412 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2413 int result = jack_activate( handle->client );
\r
2415 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2419 const char **ports;
\r
2421 // Get the list of available ports.
\r
2422 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2424 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2425 if ( ports == NULL) {
\r
2426 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2430 // Now make the port connections. Since RtAudio wasn't designed to
\r
2431 // allow the user to select particular channels of a device, we'll
\r
2432 // just open the first "nChannels" ports with offset.
\r
2433 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2435 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2436 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2439 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2446 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2454 // Now make the port connections. See note above.
\r
2455 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2457 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2458 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2461 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2468 handle->drainCounter = 0;
\r
2469 handle->internalDrain = false;
\r
2470 stream_.state = STREAM_RUNNING;
\r
2473 if ( result == 0 ) return;
\r
2474 error( RtAudioError::SYSTEM_ERROR );
\r
2477 void RtApiJack :: stopStream( void )
\r
2480 if ( stream_.state == STREAM_STOPPED ) {
\r
2481 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2482 error( RtAudioError::WARNING );
\r
2486 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2489 if ( handle->drainCounter == 0 ) {
\r
2490 handle->drainCounter = 2;
\r
2491 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2495 jack_deactivate( handle->client );
\r
2496 stream_.state = STREAM_STOPPED;
\r
2499 void RtApiJack :: abortStream( void )
\r
2502 if ( stream_.state == STREAM_STOPPED ) {
\r
2503 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2504 error( RtAudioError::WARNING );
\r
2508 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 handle->drainCounter = 2;
\r
2514 // This function will be called by a spawned thread when the user
\r
2515 // callback function signals that the stream should be stopped or
\r
2516 // aborted. It is necessary to handle it this way because the
\r
2517 // callbackEvent() function must return before the jack_deactivate()
\r
2518 // function will return.
\r
2519 static void *jackStopStream( void *ptr )
\r
2521 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2522 RtApiJack *object = (RtApiJack *) info->object;
\r
2524 object->stopStream();
\r
2525 pthread_exit( NULL );
\r
2528 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2530 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2531 if ( stream_.state == STREAM_CLOSED ) {
\r
2532 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2533 error( RtAudioError::WARNING );
\r
2536 if ( stream_.bufferSize != nframes ) {
\r
2537 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2538 error( RtAudioError::WARNING );
\r
2542 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2543 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2545 // Check if we were draining the stream and signal is finished.
\r
2546 if ( handle->drainCounter > 3 ) {
\r
2547 ThreadHandle threadId;
\r
2549 stream_.state = STREAM_STOPPING;
\r
2550 if ( handle->internalDrain == true )
\r
2551 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2553 pthread_cond_signal( &handle->condition );
\r
2557 // Invoke user callback first, to get fresh output data.
\r
2558 if ( handle->drainCounter == 0 ) {
\r
2559 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2560 double streamTime = getStreamTime();
\r
2561 RtAudioStreamStatus status = 0;
\r
2562 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2563 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2564 handle->xrun[0] = false;
\r
2566 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2567 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2568 handle->xrun[1] = false;
\r
2570 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2571 stream_.bufferSize, streamTime, status, info->userData );
\r
2572 if ( cbReturnValue == 2 ) {
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 handle->drainCounter = 2;
\r
2576 pthread_create( &id, NULL, jackStopStream, info );
\r
2579 else if ( cbReturnValue == 1 ) {
\r
2580 handle->drainCounter = 1;
\r
2581 handle->internalDrain = true;
\r
2585 jack_default_audio_sample_t *jackbuffer;
\r
2586 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2589 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2591 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2592 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2593 memset( jackbuffer, 0, bufferBytes );
\r
2597 else if ( stream_.doConvertBuffer[0] ) {
\r
2599 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2601 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2602 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2603 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2606 else { // no buffer conversion
\r
2607 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2608 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2609 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2614 // Don't bother draining input
\r
2615 if ( handle->drainCounter ) {
\r
2616 handle->drainCounter++;
\r
2620 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2622 if ( stream_.doConvertBuffer[1] ) {
\r
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2625 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2627 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2629 else { // no buffer conversion
\r
2630 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2631 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2632 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2638 RtApi::tickStreamTime();
\r
2641 //******************** End of __UNIX_JACK__ *********************//
\r
2644 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2646 // The ASIO API is designed around a callback scheme, so this
\r
2647 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2648 // Jack. The primary constraint with ASIO is that it only allows
\r
2649 // access to a single driver at a time. Thus, it is not possible to
\r
2650 // have more than one simultaneous RtAudio stream.
\r
2652 // This implementation also requires a number of external ASIO files
\r
2653 // and a few global variables. The ASIO callback scheme does not
\r
2654 // allow for the passing of user data, so we must create a global
\r
2655 // pointer to our callbackInfo structure.
\r
2657 // On unix systems, we make use of a pthread condition variable.
\r
2658 // Since there is no equivalent in Windows, I hacked something based
\r
2659 // on information found in
\r
2660 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2662 #include "asiosys.h"
\r
2664 #include "iasiothiscallresolver.h"
\r
2665 #include "asiodrivers.h"
\r
2668 static AsioDrivers drivers;
\r
2669 static ASIOCallbacks asioCallbacks;
\r
2670 static ASIODriverInfo driverInfo;
\r
2671 static CallbackInfo *asioCallbackInfo;
\r
2672 static bool asioXRun;
\r
2674 struct AsioHandle {
\r
2675 int drainCounter; // Tracks callback counts when draining
\r
2676 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2677 ASIOBufferInfo *bufferInfos;
\r
2681 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2684 // Function declarations (definitions at end of section)
\r
2685 static const char* getAsioErrorString( ASIOError result );
\r
2686 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2687 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2689 RtApiAsio :: RtApiAsio()
\r
2691 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2692 // CoInitialize beforehand, but it must be for appartment threading
\r
2693 // (in which case, CoInitilialize will return S_FALSE here).
\r
2694 coInitialized_ = false;
\r
2695 HRESULT hr = CoInitialize( NULL );
\r
2696 if ( FAILED(hr) ) {
\r
2697 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2698 error( RtAudioError::WARNING );
\r
2700 coInitialized_ = true;
\r
2702 drivers.removeCurrentDriver();
\r
2703 driverInfo.asioVersion = 2;
\r
2705 // See note in DirectSound implementation about GetDesktopWindow().
\r
2706 driverInfo.sysRef = GetForegroundWindow();
\r
2709 RtApiAsio :: ~RtApiAsio()
\r
2711 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2712 if ( coInitialized_ ) CoUninitialize();
\r
2715 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2717 return (unsigned int) drivers.asioGetNumDev();
\r
2720 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2722 RtAudio::DeviceInfo info;
\r
2723 info.probed = false;
\r
2726 unsigned int nDevices = getDeviceCount();
\r
2727 if ( nDevices == 0 ) {
\r
2728 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2729 error( RtAudioError::INVALID_USE );
\r
2733 if ( device >= nDevices ) {
\r
2734 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2735 error( RtAudioError::INVALID_USE );
\r
2739 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2740 if ( stream_.state != STREAM_CLOSED ) {
\r
2741 if ( device >= devices_.size() ) {
\r
2742 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2743 error( RtAudioError::WARNING );
\r
2746 return devices_[ device ];
\r
2749 char driverName[32];
\r
2750 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2751 if ( result != ASE_OK ) {
\r
2752 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2753 errorText_ = errorStream_.str();
\r
2754 error( RtAudioError::WARNING );
\r
2758 info.name = driverName;
\r
2760 if ( !drivers.loadDriver( driverName ) ) {
\r
2761 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2762 errorText_ = errorStream_.str();
\r
2763 error( RtAudioError::WARNING );
\r
2767 result = ASIOInit( &driverInfo );
\r
2768 if ( result != ASE_OK ) {
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 // Determine the device channel information.
\r
2776 long inputChannels, outputChannels;
\r
2777 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2778 if ( result != ASE_OK ) {
\r
2779 drivers.removeCurrentDriver();
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.outputChannels = outputChannels;
\r
2787 info.inputChannels = inputChannels;
\r
2788 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2789 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2791 // Determine the supported sample rates.
\r
2792 info.sampleRates.clear();
\r
2793 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2794 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2795 if ( result == ASE_OK ) {
\r
2796 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2798 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2799 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2803 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2804 ASIOChannelInfo channelInfo;
\r
2805 channelInfo.channel = 0;
\r
2806 channelInfo.isInput = true;
\r
2807 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2808 result = ASIOGetChannelInfo( &channelInfo );
\r
2809 if ( result != ASE_OK ) {
\r
2810 drivers.removeCurrentDriver();
\r
2811 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2812 errorText_ = errorStream_.str();
\r
2813 error( RtAudioError::WARNING );
\r
2817 info.nativeFormats = 0;
\r
2818 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2819 info.nativeFormats |= RTAUDIO_SINT16;
\r
2820 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2821 info.nativeFormats |= RTAUDIO_SINT32;
\r
2822 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2823 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2824 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2825 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2826 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2827 info.nativeFormats |= RTAUDIO_SINT24;
\r
2829 if ( info.outputChannels > 0 )
\r
2830 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2831 if ( info.inputChannels > 0 )
\r
2832 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2834 info.probed = true;
\r
2835 drivers.removeCurrentDriver();
\r
2839 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2841 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2842 object->callbackEvent( index );
\r
2845 void RtApiAsio :: saveDeviceInfo( void )
\r
2849 unsigned int nDevices = getDeviceCount();
\r
2850 devices_.resize( nDevices );
\r
2851 for ( unsigned int i=0; i<nDevices; i++ )
\r
2852 devices_[i] = getDeviceInfo( i );
\r
2855 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2856 unsigned int firstChannel, unsigned int sampleRate,
\r
2857 RtAudioFormat format, unsigned int *bufferSize,
\r
2858 RtAudio::StreamOptions *options )
\r
2859 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2861 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2863 // For ASIO, a duplex stream MUST use the same driver.
\r
2864 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2865 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2869 char driverName[32];
\r
2870 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2871 if ( result != ASE_OK ) {
\r
2872 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2873 errorText_ = errorStream_.str();
\r
2877 // Only load the driver once for duplex stream.
\r
2878 if ( !isDuplexInput ) {
\r
2879 // The getDeviceInfo() function will not work when a stream is open
\r
2880 // because ASIO does not allow multiple devices to run at the same
\r
2881 // time. Thus, we'll probe the system before opening a stream and
\r
2882 // save the results for use by getDeviceInfo().
\r
2883 this->saveDeviceInfo();
\r
2885 if ( !drivers.loadDriver( driverName ) ) {
\r
2886 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2887 errorText_ = errorStream_.str();
\r
2891 result = ASIOInit( &driverInfo );
\r
2892 if ( result != ASE_OK ) {
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2894 errorText_ = errorStream_.str();
\r
2899 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2900 bool buffersAllocated = false;
\r
2901 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2902 unsigned int nChannels;
\r
2905 // Check the device channel count.
\r
2906 long inputChannels, outputChannels;
\r
2907 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2908 if ( result != ASE_OK ) {
\r
2909 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2910 errorText_ = errorStream_.str();
\r
2914 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2915 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2916 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2917 errorText_ = errorStream_.str();
\r
2920 stream_.nDeviceChannels[mode] = channels;
\r
2921 stream_.nUserChannels[mode] = channels;
\r
2922 stream_.channelOffset[mode] = firstChannel;
\r
2924 // Verify the sample rate is supported.
\r
2925 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2926 if ( result != ASE_OK ) {
\r
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2928 errorText_ = errorStream_.str();
\r
2932 // Get the current sample rate
\r
2933 ASIOSampleRate currentRate;
\r
2934 result = ASIOGetSampleRate( ¤tRate );
\r
2935 if ( result != ASE_OK ) {
\r
2936 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2937 errorText_ = errorStream_.str();
\r
2941 // Set the sample rate only if necessary
\r
2942 if ( currentRate != sampleRate ) {
\r
2943 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2944 if ( result != ASE_OK ) {
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2946 errorText_ = errorStream_.str();
\r
2951 // Determine the driver data type.
\r
2952 ASIOChannelInfo channelInfo;
\r
2953 channelInfo.channel = 0;
\r
2954 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2955 else channelInfo.isInput = true;
\r
2956 result = ASIOGetChannelInfo( &channelInfo );
\r
2957 if ( result != ASE_OK ) {
\r
2958 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2959 errorText_ = errorStream_.str();
\r
2963 // Assuming WINDOWS host is always little-endian.
\r
2964 stream_.doByteSwap[mode] = false;
\r
2965 stream_.userFormat = format;
\r
2966 stream_.deviceFormat[mode] = 0;
\r
2967 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2968 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2969 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2971 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2972 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2973 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2975 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2976 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2977 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2979 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2980 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2981 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2983 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2984 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2985 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2988 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2989 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2990 errorText_ = errorStream_.str();
\r
2994 // Set the buffer size. For a duplex stream, this will end up
\r
2995 // setting the buffer size based on the input constraints, which
\r
2997 long minSize, maxSize, preferSize, granularity;
\r
2998 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2999 if ( result != ASE_OK ) {
\r
3000 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3001 errorText_ = errorStream_.str();
\r
3005 if ( isDuplexInput ) {
\r
3006 // When this is the duplex input (output was opened before), then we have to use the same
\r
3007 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3008 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3009 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3010 // to the "bufferSize" param as usual to set up processing buffers.
\r
3012 *bufferSize = stream_.bufferSize;
\r
3015 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3016 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3017 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3018 else if ( granularity == -1 ) {
\r
3019 // Make sure bufferSize is a power of two.
\r
3020 int log2_of_min_size = 0;
\r
3021 int log2_of_max_size = 0;
\r
3023 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3024 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3025 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3028 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3029 int min_delta_num = log2_of_min_size;
\r
3031 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3032 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3033 if (current_delta < min_delta) {
\r
3034 min_delta = current_delta;
\r
3035 min_delta_num = i;
\r
3039 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3040 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3041 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3043 else if ( granularity != 0 ) {
\r
3044 // Set to an even multiple of granularity, rounding up.
\r
3045 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3050 // we don't use it anymore, see above!
\r
3051 // Just left it here for the case...
\r
3052 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3053 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3058 stream_.bufferSize = *bufferSize;
\r
3059 stream_.nBuffers = 2;
\r
3061 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3062 else stream_.userInterleaved = true;
\r
3064 // ASIO always uses non-interleaved buffers.
\r
3065 stream_.deviceInterleaved[mode] = false;
\r
3067 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3068 if ( handle == 0 ) {
\r
3070 handle = new AsioHandle;
\r
3072 catch ( std::bad_alloc& ) {
\r
3073 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3076 handle->bufferInfos = 0;
\r
3078 // Create a manual-reset event.
\r
3079 handle->condition = CreateEvent( NULL, // no security
\r
3080 TRUE, // manual-reset
\r
3081 FALSE, // non-signaled initially
\r
3082 NULL ); // unnamed
\r
3083 stream_.apiHandle = (void *) handle;
\r
3086 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3087 // and output separately, we'll have to dispose of previously
\r
3088 // created output buffers for a duplex stream.
\r
3089 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3090 ASIODisposeBuffers();
\r
3091 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3094 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3096 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3097 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3098 if ( handle->bufferInfos == NULL ) {
\r
3099 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3100 errorText_ = errorStream_.str();
\r
3104 ASIOBufferInfo *infos;
\r
3105 infos = handle->bufferInfos;
\r
3106 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3107 infos->isInput = ASIOFalse;
\r
3108 infos->channelNum = i + stream_.channelOffset[0];
\r
3109 infos->buffers[0] = infos->buffers[1] = 0;
\r
3111 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3112 infos->isInput = ASIOTrue;
\r
3113 infos->channelNum = i + stream_.channelOffset[1];
\r
3114 infos->buffers[0] = infos->buffers[1] = 0;
\r
3117 // prepare for callbacks
\r
3118 stream_.sampleRate = sampleRate;
\r
3119 stream_.device[mode] = device;
\r
3120 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3122 // store this class instance before registering callbacks, that are going to use it
\r
3123 asioCallbackInfo = &stream_.callbackInfo;
\r
3124 stream_.callbackInfo.object = (void *) this;
\r
3126 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3127 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3128 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3129 asioCallbacks.asioMessage = &asioMessages;
\r
3130 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3131 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3132 if ( result != ASE_OK ) {
\r
3133 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3134 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3135 // in that case, let's be naïve and try that instead
\r
3136 *bufferSize = preferSize;
\r
3137 stream_.bufferSize = *bufferSize;
\r
3138 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3141 if ( result != ASE_OK ) {
\r
3142 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3143 errorText_ = errorStream_.str();
\r
3146 buffersAllocated = true;
\r
3147 stream_.state = STREAM_STOPPED;
\r
3149 // Set flags for buffer conversion.
\r
3150 stream_.doConvertBuffer[mode] = false;
\r
3151 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3152 stream_.doConvertBuffer[mode] = true;
\r
3153 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3154 stream_.nUserChannels[mode] > 1 )
\r
3155 stream_.doConvertBuffer[mode] = true;
\r
3157 // Allocate necessary internal buffers
\r
3158 unsigned long bufferBytes;
\r
3159 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3160 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3161 if ( stream_.userBuffer[mode] == NULL ) {
\r
3162 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3166 if ( stream_.doConvertBuffer[mode] ) {
\r
3168 bool makeBuffer = true;
\r
3169 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3170 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3171 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3172 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3175 if ( makeBuffer ) {
\r
3176 bufferBytes *= *bufferSize;
\r
3177 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3178 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3179 if ( stream_.deviceBuffer == NULL ) {
\r
3180 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3186 // Determine device latencies
\r
3187 long inputLatency, outputLatency;
\r
3188 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3189 if ( result != ASE_OK ) {
\r
3190 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3191 errorText_ = errorStream_.str();
\r
3192 error( RtAudioError::WARNING); // warn but don't fail
\r
3195 stream_.latency[0] = outputLatency;
\r
3196 stream_.latency[1] = inputLatency;
\r
3199 // Setup the buffer conversion information structure. We don't use
\r
3200 // buffers to do channel offsets, so we override that parameter
\r
3202 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3207 if ( !isDuplexInput ) {
\r
3208 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3209 // So we clean up for single channel only
\r
3211 if ( buffersAllocated )
\r
3212 ASIODisposeBuffers();
\r
3214 drivers.removeCurrentDriver();
\r
3217 CloseHandle( handle->condition );
\r
3218 if ( handle->bufferInfos )
\r
3219 free( handle->bufferInfos );
\r
3222 stream_.apiHandle = 0;
\r
3226 if ( stream_.userBuffer[mode] ) {
\r
3227 free( stream_.userBuffer[mode] );
\r
3228 stream_.userBuffer[mode] = 0;
\r
3231 if ( stream_.deviceBuffer ) {
\r
3232 free( stream_.deviceBuffer );
\r
3233 stream_.deviceBuffer = 0;
\r
3238 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3240 void RtApiAsio :: closeStream()
\r
3242 if ( stream_.state == STREAM_CLOSED ) {
\r
3243 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3244 error( RtAudioError::WARNING );
\r
3248 if ( stream_.state == STREAM_RUNNING ) {
\r
3249 stream_.state = STREAM_STOPPED;
\r
3252 ASIODisposeBuffers();
\r
3253 drivers.removeCurrentDriver();
\r
3255 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3257 CloseHandle( handle->condition );
\r
3258 if ( handle->bufferInfos )
\r
3259 free( handle->bufferInfos );
\r
3261 stream_.apiHandle = 0;
\r
3264 for ( int i=0; i<2; i++ ) {
\r
3265 if ( stream_.userBuffer[i] ) {
\r
3266 free( stream_.userBuffer[i] );
\r
3267 stream_.userBuffer[i] = 0;
\r
3271 if ( stream_.deviceBuffer ) {
\r
3272 free( stream_.deviceBuffer );
\r
3273 stream_.deviceBuffer = 0;
\r
3276 stream_.mode = UNINITIALIZED;
\r
3277 stream_.state = STREAM_CLOSED;
\r
3280 bool stopThreadCalled = false;
\r
3282 void RtApiAsio :: startStream()
\r
3285 if ( stream_.state == STREAM_RUNNING ) {
\r
3286 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3287 error( RtAudioError::WARNING );
\r
3291 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3292 ASIOError result = ASIOStart();
\r
3293 if ( result != ASE_OK ) {
\r
3294 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3295 errorText_ = errorStream_.str();
\r
3299 handle->drainCounter = 0;
\r
3300 handle->internalDrain = false;
\r
3301 ResetEvent( handle->condition );
\r
3302 stream_.state = STREAM_RUNNING;
\r
3306 stopThreadCalled = false;
\r
3308 if ( result == ASE_OK ) return;
\r
3309 error( RtAudioError::SYSTEM_ERROR );
\r
3312 void RtApiAsio :: stopStream()
\r
3315 if ( stream_.state == STREAM_STOPPED ) {
\r
3316 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3317 error( RtAudioError::WARNING );
\r
3321 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3322 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3323 if ( handle->drainCounter == 0 ) {
\r
3324 handle->drainCounter = 2;
\r
3325 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3329 stream_.state = STREAM_STOPPED;
\r
3331 ASIOError result = ASIOStop();
\r
3332 if ( result != ASE_OK ) {
\r
3333 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3334 errorText_ = errorStream_.str();
\r
3337 if ( result == ASE_OK ) return;
\r
3338 error( RtAudioError::SYSTEM_ERROR );
\r
3341 void RtApiAsio :: abortStream()
\r
3344 if ( stream_.state == STREAM_STOPPED ) {
\r
3345 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3346 error( RtAudioError::WARNING );
\r
3350 // The following lines were commented-out because some behavior was
\r
3351 // noted where the device buffers need to be zeroed to avoid
\r
3352 // continuing sound, even when the device buffers are completely
\r
3353 // disposed. So now, calling abort is the same as calling stop.
\r
3354 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3355 // handle->drainCounter = 2;
\r
3359 // This function will be called by a spawned thread when the user
\r
3360 // callback function signals that the stream should be stopped or
\r
3361 // aborted. It is necessary to handle it this way because the
\r
3362 // callbackEvent() function must return before the ASIOStop()
\r
3363 // function will return.
\r
3364 static unsigned __stdcall asioStopStream( void *ptr )
\r
3366 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3367 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3369 object->stopStream();
\r
3370 _endthreadex( 0 );
\r
3374 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3376 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3377 if ( stream_.state == STREAM_CLOSED ) {
\r
3378 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3379 error( RtAudioError::WARNING );
\r
3383 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3384 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3386 // Check if we were draining the stream and signal if finished.
\r
3387 if ( handle->drainCounter > 3 ) {
\r
3389 stream_.state = STREAM_STOPPING;
\r
3390 if ( handle->internalDrain == false )
\r
3391 SetEvent( handle->condition );
\r
3392 else { // spawn a thread to stop the stream
\r
3393 unsigned threadId;
\r
3394 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3395 &stream_.callbackInfo, 0, &threadId );
\r
3400 // Invoke user callback to get fresh output data UNLESS we are
\r
3401 // draining stream.
\r
3402 if ( handle->drainCounter == 0 ) {
\r
3403 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3404 double streamTime = getStreamTime();
\r
3405 RtAudioStreamStatus status = 0;
\r
3406 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3407 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3410 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3411 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3414 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3415 stream_.bufferSize, streamTime, status, info->userData );
\r
3416 if ( cbReturnValue == 2 ) {
\r
3417 stream_.state = STREAM_STOPPING;
\r
3418 handle->drainCounter = 2;
\r
3419 unsigned threadId;
\r
3420 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3421 &stream_.callbackInfo, 0, &threadId );
\r
3424 else if ( cbReturnValue == 1 ) {
\r
3425 handle->drainCounter = 1;
\r
3426 handle->internalDrain = true;
\r
3430 unsigned int nChannels, bufferBytes, i, j;
\r
3431 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3432 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3434 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3436 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3438 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3439 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3440 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3444 else if ( stream_.doConvertBuffer[0] ) {
\r
3446 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3447 if ( stream_.doByteSwap[0] )
\r
3448 byteSwapBuffer( stream_.deviceBuffer,
\r
3449 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3450 stream_.deviceFormat[0] );
\r
3452 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3453 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3454 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3455 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3461 if ( stream_.doByteSwap[0] )
\r
3462 byteSwapBuffer( stream_.userBuffer[0],
\r
3463 stream_.bufferSize * stream_.nUserChannels[0],
\r
3464 stream_.userFormat );
\r
3466 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3467 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3468 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3469 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3475 // Don't bother draining input
\r
3476 if ( handle->drainCounter ) {
\r
3477 handle->drainCounter++;
\r
3481 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3483 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3485 if (stream_.doConvertBuffer[1]) {
\r
3487 // Always interleave ASIO input data.
\r
3488 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3489 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3490 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3491 handle->bufferInfos[i].buffers[bufferIndex],
\r
3495 if ( stream_.doByteSwap[1] )
\r
3496 byteSwapBuffer( stream_.deviceBuffer,
\r
3497 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3498 stream_.deviceFormat[1] );
\r
3499 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3503 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3504 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3505 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3506 handle->bufferInfos[i].buffers[bufferIndex],
\r
3511 if ( stream_.doByteSwap[1] )
\r
3512 byteSwapBuffer( stream_.userBuffer[1],
\r
3513 stream_.bufferSize * stream_.nUserChannels[1],
\r
3514 stream_.userFormat );
\r
3519 // The following call was suggested by Malte Clasen. While the API
\r
3520 // documentation indicates it should not be required, some device
\r
3521 // drivers apparently do not function correctly without it.
\r
3522 ASIOOutputReady();
\r
3524 RtApi::tickStreamTime();
\r
3528 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3530 // The ASIO documentation says that this usually only happens during
\r
3531 // external sync. Audio processing is not stopped by the driver,
\r
3532 // actual sample rate might not have even changed, maybe only the
\r
3533 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3536 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3538 object->stopStream();
\r
3540 catch ( RtAudioError &exception ) {
\r
3541 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3545 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3548 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3552 switch( selector ) {
\r
3553 case kAsioSelectorSupported:
\r
3554 if ( value == kAsioResetRequest
\r
3555 || value == kAsioEngineVersion
\r
3556 || value == kAsioResyncRequest
\r
3557 || value == kAsioLatenciesChanged
\r
3558 // The following three were added for ASIO 2.0, you don't
\r
3559 // necessarily have to support them.
\r
3560 || value == kAsioSupportsTimeInfo
\r
3561 || value == kAsioSupportsTimeCode
\r
3562 || value == kAsioSupportsInputMonitor)
\r
3565 case kAsioResetRequest:
\r
3566 // Defer the task and perform the reset of the driver during the
\r
3567 // next "safe" situation. You cannot reset the driver right now,
\r
3568 // as this code is called from the driver. Reset the driver is
\r
3569 // done by completely destruct is. I.e. ASIOStop(),
\r
3570 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3572 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3575 case kAsioResyncRequest:
\r
3576 // This informs the application that the driver encountered some
\r
3577 // non-fatal data loss. It is used for synchronization purposes
\r
3578 // of different media. Added mainly to work around the Win16Mutex
\r
3579 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3580 // which could lose data because the Mutex was held too long by
\r
3581 // another thread. However a driver can issue it in other
\r
3582 // situations, too.
\r
3583 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3587 case kAsioLatenciesChanged:
\r
3588 // This will inform the host application that the drivers were
\r
3589 // latencies changed. Beware, it this does not mean that the
\r
3590 // buffer sizes have changed! You might need to update internal
\r
3592 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3595 case kAsioEngineVersion:
\r
3596 // Return the supported ASIO version of the host application. If
\r
3597 // a host application does not implement this selector, ASIO 1.0
\r
3598 // is assumed by the driver.
\r
3601 case kAsioSupportsTimeInfo:
\r
3602 // Informs the driver whether the
\r
3603 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3604 // For compatibility with ASIO 1.0 drivers the host application
\r
3605 // should always support the "old" bufferSwitch method, too.
\r
3608 case kAsioSupportsTimeCode:
\r
3609 // Informs the driver whether application is interested in time
\r
3610 // code info. If an application does not need to know about time
\r
3611 // code, the driver has less work to do.
\r
3618 static const char* getAsioErrorString( ASIOError result )
\r
3623 const char*message;
\r
3626 static const Messages m[] =
\r
3628 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3629 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3630 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3631 { ASE_InvalidMode, "Invalid mode." },
\r
3632 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3633 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3634 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3637 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3638 if ( m[i].value == result ) return m[i].message;
\r
3640 return "Unknown error.";
\r
3643 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3647 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3649 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3650 // - Introduces support for the Windows WASAPI API
\r
3651 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3652 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3653 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3658 #include <audioclient.h>
\r
3660 #include <mmdeviceapi.h>
\r
3661 #include <functiondiscoverykeys_devpkey.h>
\r
3663 //=============================================================================
\r
3665 #define SAFE_RELEASE( objectPtr )\
\r
3668 objectPtr->Release();\
\r
3669 objectPtr = NULL;\
\r
3672 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3674 //-----------------------------------------------------------------------------
\r
3676 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3677 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3678 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3679 // provide intermediate storage for read / write synchronization.
\r
3680 class WasapiBuffer
\r
3684 : buffer_( NULL ),
\r
3693 // sets the length of the internal ring buffer
\r
3694 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3697 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3699 bufferSize_ = bufferSize;
\r
3704 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3705 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3707 if ( !buffer || // incoming buffer is NULL
\r
3708 bufferSize == 0 || // incoming buffer has no data
\r
3709 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3714 unsigned int relOutIndex = outIndex_;
\r
3715 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3716 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3717 relOutIndex += bufferSize_;
\r
3720 // "in" index can end on the "out" index but cannot begin at it
\r
3721 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3722 return false; // not enough space between "in" index and "out" index
\r
3725 // copy buffer from external to internal
\r
3726 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3727 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3728 int fromInSize = bufferSize - fromZeroSize;
\r
3732 case RTAUDIO_SINT8:
\r
3733 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3734 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3736 case RTAUDIO_SINT16:
\r
3737 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3738 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3740 case RTAUDIO_SINT24:
\r
3741 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3742 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3744 case RTAUDIO_SINT32:
\r
3745 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3746 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3748 case RTAUDIO_FLOAT32:
\r
3749 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3750 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3752 case RTAUDIO_FLOAT64:
\r
3753 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3754 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3758 // update "in" index
\r
3759 inIndex_ += bufferSize;
\r
3760 inIndex_ %= bufferSize_;
\r
3765 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3766 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3768 if ( !buffer || // incoming buffer is NULL
\r
3769 bufferSize == 0 || // incoming buffer has no data
\r
3770 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3775 unsigned int relInIndex = inIndex_;
\r
3776 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3777 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3778 relInIndex += bufferSize_;
\r
3781 // "out" index can begin at and end on the "in" index
\r
3782 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3783 return false; // not enough space between "out" index and "in" index
\r
3786 // copy buffer from internal to external
\r
3787 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3788 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3789 int fromOutSize = bufferSize - fromZeroSize;
\r
3793 case RTAUDIO_SINT8:
\r
3794 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3795 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3797 case RTAUDIO_SINT16:
\r
3798 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3799 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3801 case RTAUDIO_SINT24:
\r
3802 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3803 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3805 case RTAUDIO_SINT32:
\r
3806 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3807 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3809 case RTAUDIO_FLOAT32:
\r
3810 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3811 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3813 case RTAUDIO_FLOAT64:
\r
3814 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3815 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3819 // update "out" index
\r
3820 outIndex_ += bufferSize;
\r
3821 outIndex_ %= bufferSize_;
\r
3828 unsigned int bufferSize_;
\r
3829 unsigned int inIndex_;
\r
3830 unsigned int outIndex_;
\r
3833 //-----------------------------------------------------------------------------
\r
3835 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3836 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3837 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3838 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3839 // one rate and its multiple.
\r
3840 void convertBufferWasapi( char* outBuffer,
\r
3841 const char* inBuffer,
\r
3842 const unsigned int& channelCount,
\r
3843 const unsigned int& inSampleRate,
\r
3844 const unsigned int& outSampleRate,
\r
3845 const unsigned int& inSampleCount,
\r
3846 unsigned int& outSampleCount,
\r
3847 const RtAudioFormat& format )
\r
3849 // calculate the new outSampleCount and relative sampleStep
\r
3850 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3851 float sampleStep = 1.0f / sampleRatio;
\r
3852 float inSampleFraction = 0.0f;
\r
3854 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3856 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3857 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3859 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3863 case RTAUDIO_SINT8:
\r
3864 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3866 case RTAUDIO_SINT16:
\r
3867 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3869 case RTAUDIO_SINT24:
\r
3870 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3872 case RTAUDIO_SINT32:
\r
3873 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3875 case RTAUDIO_FLOAT32:
\r
3876 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3878 case RTAUDIO_FLOAT64:
\r
3879 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3883 // jump to next in sample
\r
3884 inSampleFraction += sampleStep;
\r
3888 //-----------------------------------------------------------------------------
\r
3890 // A structure to hold various information related to the WASAPI implementation.
\r
3891 struct WasapiHandle
\r
3893 IAudioClient* captureAudioClient;
\r
3894 IAudioClient* renderAudioClient;
\r
3895 IAudioCaptureClient* captureClient;
\r
3896 IAudioRenderClient* renderClient;
\r
3897 HANDLE captureEvent;
\r
3898 HANDLE renderEvent;
\r
3901 : captureAudioClient( NULL ),
\r
3902 renderAudioClient( NULL ),
\r
3903 captureClient( NULL ),
\r
3904 renderClient( NULL ),
\r
3905 captureEvent( NULL ),
\r
3906 renderEvent( NULL ) {}
\r
3909 //=============================================================================
\r
3911 RtApiWasapi::RtApiWasapi()
\r
3912 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3914 // WASAPI can run either apartment or multi-threaded
\r
3915 HRESULT hr = CoInitialize( NULL );
\r
3916 if ( !FAILED( hr ) )
\r
3917 coInitialized_ = true;
\r
3919 // Instantiate device enumerator
\r
3920 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3921 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3922 ( void** ) &deviceEnumerator_ );
\r
3924 if ( FAILED( hr ) ) {
\r
3925 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3926 error( RtAudioError::DRIVER_ERROR );
\r
3930 //-----------------------------------------------------------------------------
\r
3932 RtApiWasapi::~RtApiWasapi()
\r
3934 if ( stream_.state != STREAM_CLOSED )
\r
3937 SAFE_RELEASE( deviceEnumerator_ );
\r
3939 // If this object previously called CoInitialize()
\r
3940 if ( coInitialized_ )
\r
3944 //=============================================================================
\r
3946 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3948 unsigned int captureDeviceCount = 0;
\r
3949 unsigned int renderDeviceCount = 0;
\r
3951 IMMDeviceCollection* captureDevices = NULL;
\r
3952 IMMDeviceCollection* renderDevices = NULL;
\r
3954 // Count capture devices
\r
3955 errorText_.clear();
\r
3956 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3957 if ( FAILED( hr ) ) {
\r
3958 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3962 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3963 if ( FAILED( hr ) ) {
\r
3964 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3968 // Count render devices
\r
3969 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3970 if ( FAILED( hr ) ) {
\r
3971 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3975 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3976 if ( FAILED( hr ) ) {
\r
3977 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3982 // release all references
\r
3983 SAFE_RELEASE( captureDevices );
\r
3984 SAFE_RELEASE( renderDevices );
\r
3986 if ( errorText_.empty() )
\r
3987 return captureDeviceCount + renderDeviceCount;
\r
3989 error( RtAudioError::DRIVER_ERROR );
\r
3993 //-----------------------------------------------------------------------------
\r
3995 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3997 RtAudio::DeviceInfo info;
\r
3998 unsigned int captureDeviceCount = 0;
\r
3999 unsigned int renderDeviceCount = 0;
\r
4000 std::string defaultDeviceName;
\r
4001 bool isCaptureDevice = false;
\r
4003 PROPVARIANT deviceNameProp;
\r
4004 PROPVARIANT defaultDeviceNameProp;
\r
4006 IMMDeviceCollection* captureDevices = NULL;
\r
4007 IMMDeviceCollection* renderDevices = NULL;
\r
4008 IMMDevice* devicePtr = NULL;
\r
4009 IMMDevice* defaultDevicePtr = NULL;
\r
4010 IAudioClient* audioClient = NULL;
\r
4011 IPropertyStore* devicePropStore = NULL;
\r
4012 IPropertyStore* defaultDevicePropStore = NULL;
\r
4014 WAVEFORMATEX* deviceFormat = NULL;
\r
4015 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4018 info.probed = false;
\r
4020 // Count capture devices
\r
4021 errorText_.clear();
\r
4022 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4023 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4024 if ( FAILED( hr ) ) {
\r
4025 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4029 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4030 if ( FAILED( hr ) ) {
\r
4031 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4035 // Count render devices
\r
4036 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4037 if ( FAILED( hr ) ) {
\r
4038 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4042 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4043 if ( FAILED( hr ) ) {
\r
4044 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4048 // validate device index
\r
4049 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4050 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4051 errorType = RtAudioError::INVALID_USE;
\r
4055 // determine whether index falls within capture or render devices
\r
4056 if ( device >= renderDeviceCount ) {
\r
4057 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4058 if ( FAILED( hr ) ) {
\r
4059 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4062 isCaptureDevice = true;
\r
4065 hr = renderDevices->Item( device, &devicePtr );
\r
4066 if ( FAILED( hr ) ) {
\r
4067 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4070 isCaptureDevice = false;
\r
4073 // get default device name
\r
4074 if ( isCaptureDevice ) {
\r
4075 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4076 if ( FAILED( hr ) ) {
\r
4077 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4082 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4083 if ( FAILED( hr ) ) {
\r
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4089 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4090 if ( FAILED( hr ) ) {
\r
4091 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4094 PropVariantInit( &defaultDeviceNameProp );
\r
4096 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4097 if ( FAILED( hr ) ) {
\r
4098 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4102 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4105 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4106 if ( FAILED( hr ) ) {
\r
4107 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4111 PropVariantInit( &deviceNameProp );
\r
4113 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4114 if ( FAILED( hr ) ) {
\r
4115 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4119 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4122 if ( isCaptureDevice ) {
\r
4123 info.isDefaultInput = info.name == defaultDeviceName;
\r
4124 info.isDefaultOutput = false;
\r
4127 info.isDefaultInput = false;
\r
4128 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4132 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4133 if ( FAILED( hr ) ) {
\r
4134 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4138 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4139 if ( FAILED( hr ) ) {
\r
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4144 if ( isCaptureDevice ) {
\r
4145 info.inputChannels = deviceFormat->nChannels;
\r
4146 info.outputChannels = 0;
\r
4147 info.duplexChannels = 0;
\r
4150 info.inputChannels = 0;
\r
4151 info.outputChannels = deviceFormat->nChannels;
\r
4152 info.duplexChannels = 0;
\r
4156 info.sampleRates.clear();
\r
4158 // allow support for all sample rates as we have a built-in sample rate converter
\r
4159 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4160 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4162 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4165 info.nativeFormats = 0;
\r
4167 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4168 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4169 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4171 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4172 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4174 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4175 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4178 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4179 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4180 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4182 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4183 info.nativeFormats |= RTAUDIO_SINT8;
\r
4185 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4186 info.nativeFormats |= RTAUDIO_SINT16;
\r
4188 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4189 info.nativeFormats |= RTAUDIO_SINT24;
\r
4191 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4192 info.nativeFormats |= RTAUDIO_SINT32;
\r
4197 info.probed = true;
\r
4200 // release all references
\r
4201 PropVariantClear( &deviceNameProp );
\r
4202 PropVariantClear( &defaultDeviceNameProp );
\r
4204 SAFE_RELEASE( captureDevices );
\r
4205 SAFE_RELEASE( renderDevices );
\r
4206 SAFE_RELEASE( devicePtr );
\r
4207 SAFE_RELEASE( defaultDevicePtr );
\r
4208 SAFE_RELEASE( audioClient );
\r
4209 SAFE_RELEASE( devicePropStore );
\r
4210 SAFE_RELEASE( defaultDevicePropStore );
\r
4212 CoTaskMemFree( deviceFormat );
\r
4213 CoTaskMemFree( closestMatchFormat );
\r
4215 if ( !errorText_.empty() )
\r
4216 error( errorType );
\r
4220 //-----------------------------------------------------------------------------
\r
4222 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4224 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4225 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4233 //-----------------------------------------------------------------------------
\r
4235 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4237 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4238 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4246 //-----------------------------------------------------------------------------
\r
4248 void RtApiWasapi::closeStream( void )
\r
4250 if ( stream_.state == STREAM_CLOSED ) {
\r
4251 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4252 error( RtAudioError::WARNING );
\r
4256 if ( stream_.state != STREAM_STOPPED )
\r
4259 // clean up stream memory
\r
4260 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4261 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4263 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4264 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4266 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4267 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4269 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4270 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4272 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4273 stream_.apiHandle = NULL;
\r
4275 for ( int i = 0; i < 2; i++ ) {
\r
4276 if ( stream_.userBuffer[i] ) {
\r
4277 free( stream_.userBuffer[i] );
\r
4278 stream_.userBuffer[i] = 0;
\r
4282 if ( stream_.deviceBuffer ) {
\r
4283 free( stream_.deviceBuffer );
\r
4284 stream_.deviceBuffer = 0;
\r
4287 // update stream state
\r
4288 stream_.state = STREAM_CLOSED;
\r
4291 //-----------------------------------------------------------------------------
\r
4293 void RtApiWasapi::startStream( void )
\r
4297 if ( stream_.state == STREAM_RUNNING ) {
\r
4298 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4299 error( RtAudioError::WARNING );
\r
4303 // update stream state
\r
4304 stream_.state = STREAM_RUNNING;
\r
4306 // create WASAPI stream thread
\r
4307 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4309 if ( !stream_.callbackInfo.thread ) {
\r
4310 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4311 error( RtAudioError::THREAD_ERROR );
\r
4314 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4315 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4319 //-----------------------------------------------------------------------------
\r
4321 void RtApiWasapi::stopStream( void )
\r
4325 if ( stream_.state == STREAM_STOPPED ) {
\r
4326 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4327 error( RtAudioError::WARNING );
\r
4331 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4332 stream_.state = STREAM_STOPPING;
\r
4334 // wait until stream thread is stopped
\r
4335 while( stream_.state != STREAM_STOPPED ) {
\r
4339 // Wait for the last buffer to play before stopping.
\r
4340 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4342 // stop capture client if applicable
\r
4343 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4344 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4345 if ( FAILED( hr ) ) {
\r
4346 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4347 error( RtAudioError::DRIVER_ERROR );
\r
4352 // stop render client if applicable
\r
4353 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4354 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4355 if ( FAILED( hr ) ) {
\r
4356 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4357 error( RtAudioError::DRIVER_ERROR );
\r
4362 // close thread handle
\r
4363 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4364 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4365 error( RtAudioError::THREAD_ERROR );
\r
4369 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4372 //-----------------------------------------------------------------------------
\r
4374 void RtApiWasapi::abortStream( void )
\r
4378 if ( stream_.state == STREAM_STOPPED ) {
\r
4379 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4380 error( RtAudioError::WARNING );
\r
4384 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4385 stream_.state = STREAM_STOPPING;
\r
4387 // wait until stream thread is stopped
\r
4388 while ( stream_.state != STREAM_STOPPED ) {
\r
4392 // stop capture client if applicable
\r
4393 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4394 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4395 if ( FAILED( hr ) ) {
\r
4396 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4397 error( RtAudioError::DRIVER_ERROR );
\r
4402 // stop render client if applicable
\r
4403 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4404 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4405 if ( FAILED( hr ) ) {
\r
4406 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4407 error( RtAudioError::DRIVER_ERROR );
\r
4412 // close thread handle
\r
4413 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4414 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4415 error( RtAudioError::THREAD_ERROR );
\r
4419 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4422 //-----------------------------------------------------------------------------
\r
4424 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4425 unsigned int firstChannel, unsigned int sampleRate,
\r
4426 RtAudioFormat format, unsigned int* bufferSize,
\r
4427 RtAudio::StreamOptions* options )
\r
4429 bool methodResult = FAILURE;
\r
4430 unsigned int captureDeviceCount = 0;
\r
4431 unsigned int renderDeviceCount = 0;
\r
4433 IMMDeviceCollection* captureDevices = NULL;
\r
4434 IMMDeviceCollection* renderDevices = NULL;
\r
4435 IMMDevice* devicePtr = NULL;
\r
4436 WAVEFORMATEX* deviceFormat = NULL;
\r
4437 unsigned int bufferBytes;
\r
4438 stream_.state = STREAM_STOPPED;
\r
4440 // create API Handle if not already created
\r
4441 if ( !stream_.apiHandle )
\r
4442 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4444 // Count capture devices
\r
4445 errorText_.clear();
\r
4446 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4447 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4448 if ( FAILED( hr ) ) {
\r
4449 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4453 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4454 if ( FAILED( hr ) ) {
\r
4455 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4459 // Count render devices
\r
4460 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4461 if ( FAILED( hr ) ) {
\r
4462 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4466 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4467 if ( FAILED( hr ) ) {
\r
4468 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4472 // validate device index
\r
4473 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4474 errorType = RtAudioError::INVALID_USE;
\r
4475 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4479 // determine whether index falls within capture or render devices
\r
4480 if ( device >= renderDeviceCount ) {
\r
4481 if ( mode != INPUT ) {
\r
4482 errorType = RtAudioError::INVALID_USE;
\r
4483 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4487 // retrieve captureAudioClient from devicePtr
\r
4488 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4490 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4491 if ( FAILED( hr ) ) {
\r
4492 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4496 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4497 NULL, ( void** ) &captureAudioClient );
\r
4498 if ( FAILED( hr ) ) {
\r
4499 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4503 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4504 if ( FAILED( hr ) ) {
\r
4505 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4509 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4510 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4513 if ( mode != OUTPUT ) {
\r
4514 errorType = RtAudioError::INVALID_USE;
\r
4515 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4519 // retrieve renderAudioClient from devicePtr
\r
4520 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4522 hr = renderDevices->Item( device, &devicePtr );
\r
4523 if ( FAILED( hr ) ) {
\r
4524 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4528 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4529 NULL, ( void** ) &renderAudioClient );
\r
4530 if ( FAILED( hr ) ) {
\r
4531 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4535 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4536 if ( FAILED( hr ) ) {
\r
4537 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4541 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4542 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4545 // fill stream data
\r
4546 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4547 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4548 stream_.mode = DUPLEX;
\r
4551 stream_.mode = mode;
\r
4554 stream_.device[mode] = device;
\r
4555 stream_.doByteSwap[mode] = false;
\r
4556 stream_.sampleRate = sampleRate;
\r
4557 stream_.bufferSize = *bufferSize;
\r
4558 stream_.nBuffers = 1;
\r
4559 stream_.nUserChannels[mode] = channels;
\r
4560 stream_.channelOffset[mode] = firstChannel;
\r
4561 stream_.userFormat = format;
\r
4562 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4564 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4565 stream_.userInterleaved = false;
\r
4567 stream_.userInterleaved = true;
\r
4568 stream_.deviceInterleaved[mode] = true;
\r
4570 // Set flags for buffer conversion.
\r
4571 stream_.doConvertBuffer[mode] = false;
\r
4572 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4573 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4574 stream_.doConvertBuffer[mode] = true;
\r
4575 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4576 stream_.nUserChannels[mode] > 1 )
\r
4577 stream_.doConvertBuffer[mode] = true;
\r
4579 if ( stream_.doConvertBuffer[mode] )
\r
4580 setConvertInfo( mode, 0 );
\r
4582 // Allocate necessary internal buffers
\r
4583 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4585 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4586 if ( !stream_.userBuffer[mode] ) {
\r
4587 errorType = RtAudioError::MEMORY_ERROR;
\r
4588 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4592 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4593 stream_.callbackInfo.priority = 15;
\r
4595 stream_.callbackInfo.priority = 0;
\r
4597 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4598 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4600 methodResult = SUCCESS;
\r
4604 SAFE_RELEASE( captureDevices );
\r
4605 SAFE_RELEASE( renderDevices );
\r
4606 SAFE_RELEASE( devicePtr );
\r
4607 CoTaskMemFree( deviceFormat );
\r
4609 // if method failed, close the stream
\r
4610 if ( methodResult == FAILURE )
\r
4613 if ( !errorText_.empty() )
\r
4614 error( errorType );
\r
4615 return methodResult;
\r
4618 //=============================================================================
\r
4620 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4623 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4628 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4631 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4636 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4639 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4644 //-----------------------------------------------------------------------------
\r
4646 void RtApiWasapi::wasapiThread()
\r
4648 // as this is a new thread, we must CoInitialize it
\r
4649 CoInitialize( NULL );
\r
4653 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4654 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4655 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4656 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4657 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4658 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4660 WAVEFORMATEX* captureFormat = NULL;
\r
4661 WAVEFORMATEX* renderFormat = NULL;
\r
4662 float captureSrRatio = 0.0f;
\r
4663 float renderSrRatio = 0.0f;
\r
4664 WasapiBuffer captureBuffer;
\r
4665 WasapiBuffer renderBuffer;
\r
4667 // declare local stream variables
\r
4668 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4669 BYTE* streamBuffer = NULL;
\r
4670 unsigned long captureFlags = 0;
\r
4671 unsigned int bufferFrameCount = 0;
\r
4672 unsigned int numFramesPadding = 0;
\r
4673 unsigned int convBufferSize = 0;
\r
4674 bool callbackPushed = false;
\r
4675 bool callbackPulled = false;
\r
4676 bool callbackStopped = false;
\r
4677 int callbackResult = 0;
\r
4679 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4680 char* convBuffer = NULL;
\r
4681 unsigned int convBuffSize = 0;
\r
4682 unsigned int deviceBuffSize = 0;
\r
4684 errorText_.clear();
\r
4685 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4687 // Attempt to assign "Pro Audio" characteristic to thread
\r
4688 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4690 DWORD taskIndex = 0;
\r
4691 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4692 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4693 FreeLibrary( AvrtDll );
\r
4696 // start capture stream if applicable
\r
4697 if ( captureAudioClient ) {
\r
4698 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4699 if ( FAILED( hr ) ) {
\r
4700 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4704 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4706 // initialize capture stream according to desire buffer size
\r
4707 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4708 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4710 if ( !captureClient ) {
\r
4711 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4712 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4713 desiredBufferPeriod,
\r
4714 desiredBufferPeriod,
\r
4717 if ( FAILED( hr ) ) {
\r
4718 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4722 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4723 ( void** ) &captureClient );
\r
4724 if ( FAILED( hr ) ) {
\r
4725 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4729 // configure captureEvent to trigger on every available capture buffer
\r
4730 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4731 if ( !captureEvent ) {
\r
4732 errorType = RtAudioError::SYSTEM_ERROR;
\r
4733 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4737 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4738 if ( FAILED( hr ) ) {
\r
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4743 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4744 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4747 unsigned int inBufferSize = 0;
\r
4748 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4749 if ( FAILED( hr ) ) {
\r
4750 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4754 // scale outBufferSize according to stream->user sample rate ratio
\r
4755 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4756 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4758 // set captureBuffer size
\r
4759 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4761 // reset the capture stream
\r
4762 hr = captureAudioClient->Reset();
\r
4763 if ( FAILED( hr ) ) {
\r
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4768 // start the capture stream
\r
4769 hr = captureAudioClient->Start();
\r
4770 if ( FAILED( hr ) ) {
\r
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4776 // start render stream if applicable
\r
4777 if ( renderAudioClient ) {
\r
4778 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4779 if ( FAILED( hr ) ) {
\r
4780 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4784 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4786 // initialize render stream according to desire buffer size
\r
4787 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4788 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4790 if ( !renderClient ) {
\r
4791 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4792 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4793 desiredBufferPeriod,
\r
4794 desiredBufferPeriod,
\r
4797 if ( FAILED( hr ) ) {
\r
4798 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4802 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4803 ( void** ) &renderClient );
\r
4804 if ( FAILED( hr ) ) {
\r
4805 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4809 // configure renderEvent to trigger on every available render buffer
\r
4810 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4811 if ( !renderEvent ) {
\r
4812 errorType = RtAudioError::SYSTEM_ERROR;
\r
4813 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4817 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4818 if ( FAILED( hr ) ) {
\r
4819 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4823 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4824 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4827 unsigned int outBufferSize = 0;
\r
4828 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4829 if ( FAILED( hr ) ) {
\r
4830 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4834 // scale inBufferSize according to user->stream sample rate ratio
\r
4835 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4836 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4838 // set renderBuffer size
\r
4839 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4841 // reset the render stream
\r
4842 hr = renderAudioClient->Reset();
\r
4843 if ( FAILED( hr ) ) {
\r
4844 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4848 // start the render stream
\r
4849 hr = renderAudioClient->Start();
\r
4850 if ( FAILED( hr ) ) {
\r
4851 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4856 if ( stream_.mode == INPUT ) {
\r
4857 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4858 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4860 else if ( stream_.mode == OUTPUT ) {
\r
4861 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4862 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4864 else if ( stream_.mode == DUPLEX ) {
\r
4865 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4866 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4867 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4868 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4871 convBuffer = ( char* ) malloc( convBuffSize );
\r
4872 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4873 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4874 errorType = RtAudioError::MEMORY_ERROR;
\r
4875 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4879 // stream process loop
\r
4880 while ( stream_.state != STREAM_STOPPING ) {
\r
4881 if ( !callbackPulled ) {
\r
4884 // 1. Pull callback buffer from inputBuffer
\r
4885 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4886 // Convert callback buffer to user format
\r
4888 if ( captureAudioClient ) {
\r
4889 // Pull callback buffer from inputBuffer
\r
4890 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4891 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4892 stream_.deviceFormat[INPUT] );
\r
4894 if ( callbackPulled ) {
\r
4895 // Convert callback buffer to user sample rate
\r
4896 convertBufferWasapi( stream_.deviceBuffer,
\r
4898 stream_.nDeviceChannels[INPUT],
\r
4899 captureFormat->nSamplesPerSec,
\r
4900 stream_.sampleRate,
\r
4901 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4903 stream_.deviceFormat[INPUT] );
\r
4905 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4906 // Convert callback buffer to user format
\r
4907 convertBuffer( stream_.userBuffer[INPUT],
\r
4908 stream_.deviceBuffer,
\r
4909 stream_.convertInfo[INPUT] );
\r
4912 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4913 memcpy( stream_.userBuffer[INPUT],
\r
4914 stream_.deviceBuffer,
\r
4915 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4920 // if there is no capture stream, set callbackPulled flag
\r
4921 callbackPulled = true;
\r
4924 // Execute Callback
\r
4925 // ================
\r
4926 // 1. Execute user callback method
\r
4927 // 2. Handle return value from callback
\r
4929 // if callback has not requested the stream to stop
\r
4930 if ( callbackPulled && !callbackStopped ) {
\r
4931 // Execute user callback method
\r
4932 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4933 stream_.userBuffer[INPUT],
\r
4934 stream_.bufferSize,
\r
4936 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4937 stream_.callbackInfo.userData );
\r
4939 // Handle return value from callback
\r
4940 if ( callbackResult == 1 ) {
\r
4941 // instantiate a thread to stop this thread
\r
4942 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4943 if ( !threadHandle ) {
\r
4944 errorType = RtAudioError::THREAD_ERROR;
\r
4945 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4948 else if ( !CloseHandle( threadHandle ) ) {
\r
4949 errorType = RtAudioError::THREAD_ERROR;
\r
4950 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4954 callbackStopped = true;
\r
4956 else if ( callbackResult == 2 ) {
\r
4957 // instantiate a thread to stop this thread
\r
4958 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4959 if ( !threadHandle ) {
\r
4960 errorType = RtAudioError::THREAD_ERROR;
\r
4961 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4964 else if ( !CloseHandle( threadHandle ) ) {
\r
4965 errorType = RtAudioError::THREAD_ERROR;
\r
4966 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4970 callbackStopped = true;
\r
4975 // Callback Output
\r
4976 // ===============
\r
4977 // 1. Convert callback buffer to stream format
\r
4978 // 2. Convert callback buffer to stream sample rate and channel count
\r
4979 // 3. Push callback buffer into outputBuffer
\r
4981 if ( renderAudioClient && callbackPulled ) {
\r
4982 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4983 // Convert callback buffer to stream format
\r
4984 convertBuffer( stream_.deviceBuffer,
\r
4985 stream_.userBuffer[OUTPUT],
\r
4986 stream_.convertInfo[OUTPUT] );
\r
4990 // Convert callback buffer to stream sample rate
\r
4991 convertBufferWasapi( convBuffer,
\r
4992 stream_.deviceBuffer,
\r
4993 stream_.nDeviceChannels[OUTPUT],
\r
4994 stream_.sampleRate,
\r
4995 renderFormat->nSamplesPerSec,
\r
4996 stream_.bufferSize,
\r
4998 stream_.deviceFormat[OUTPUT] );
\r
5000 // Push callback buffer into outputBuffer
\r
5001 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5002 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5003 stream_.deviceFormat[OUTPUT] );
\r
5006 // if there is no render stream, set callbackPushed flag
\r
5007 callbackPushed = true;
\r
5012 // 1. Get capture buffer from stream
\r
5013 // 2. Push capture buffer into inputBuffer
\r
5014 // 3. If 2. was successful: Release capture buffer
\r
5016 if ( captureAudioClient ) {
\r
5017 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5018 if ( !callbackPulled ) {
\r
5019 WaitForSingleObject( captureEvent, INFINITE );
\r
5022 // Get capture buffer from stream
\r
5023 hr = captureClient->GetBuffer( &streamBuffer,
\r
5024 &bufferFrameCount,
\r
5025 &captureFlags, NULL, NULL );
\r
5026 if ( FAILED( hr ) ) {
\r
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5031 if ( bufferFrameCount != 0 ) {
\r
5032 // Push capture buffer into inputBuffer
\r
5033 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5034 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5035 stream_.deviceFormat[INPUT] ) )
\r
5037 // Release capture buffer
\r
5038 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5039 if ( FAILED( hr ) ) {
\r
5040 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5046 // Inform WASAPI that capture was unsuccessful
\r
5047 hr = captureClient->ReleaseBuffer( 0 );
\r
5048 if ( FAILED( hr ) ) {
\r
5049 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5056 // Inform WASAPI that capture was unsuccessful
\r
5057 hr = captureClient->ReleaseBuffer( 0 );
\r
5058 if ( FAILED( hr ) ) {
\r
5059 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5067 // 1. Get render buffer from stream
\r
5068 // 2. Pull next buffer from outputBuffer
\r
5069 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5070 // Release render buffer
\r
5072 if ( renderAudioClient ) {
\r
5073 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5074 if ( callbackPulled && !callbackPushed ) {
\r
5075 WaitForSingleObject( renderEvent, INFINITE );
\r
5078 // Get render buffer from stream
\r
5079 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5080 if ( FAILED( hr ) ) {
\r
5081 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5085 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5086 if ( FAILED( hr ) ) {
\r
5087 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5091 bufferFrameCount -= numFramesPadding;
\r
5093 if ( bufferFrameCount != 0 ) {
\r
5094 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5095 if ( FAILED( hr ) ) {
\r
5096 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5100 // Pull next buffer from outputBuffer
\r
5101 // Fill render buffer with next buffer
\r
5102 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5103 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5104 stream_.deviceFormat[OUTPUT] ) )
\r
5106 // Release render buffer
\r
5107 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5108 if ( FAILED( hr ) ) {
\r
5109 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5115 // Inform WASAPI that render was unsuccessful
\r
5116 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5117 if ( FAILED( hr ) ) {
\r
5118 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5125 // Inform WASAPI that render was unsuccessful
\r
5126 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5127 if ( FAILED( hr ) ) {
\r
5128 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5134 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5135 if ( callbackPushed ) {
\r
5136 callbackPulled = false;
\r
5139 // tick stream time
\r
5140 RtApi::tickStreamTime();
\r
5145 CoTaskMemFree( captureFormat );
\r
5146 CoTaskMemFree( renderFormat );
\r
5148 free ( convBuffer );
\r
5152 // update stream state
\r
5153 stream_.state = STREAM_STOPPED;
\r
5155 if ( errorText_.empty() )
\r
5158 error( errorType );
\r
5161 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5165 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5167 // Modified by Robin Davies, October 2005
\r
5168 // - Improvements to DirectX pointer chasing.
\r
5169 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5170 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5171 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5172 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5174 #include <dsound.h>
\r
5175 #include <assert.h>
\r
5176 #include <algorithm>
\r
5178 #if defined(__MINGW32__)
\r
5179 // missing from latest mingw winapi
\r
5180 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5181 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5182 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5183 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5186 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5188 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5189 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5192 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5194 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5195 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5196 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5197 return pointer >= earlierPointer && pointer < laterPointer;
\r
5200 // A structure to hold various information related to the DirectSound
\r
5201 // API implementation.
\r
5203 unsigned int drainCounter; // Tracks callback counts when draining
\r
5204 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5208 UINT bufferPointer[2];
\r
5209 DWORD dsBufferSize[2];
\r
5210 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5214 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5217 // Declarations for utility functions, callbacks, and structures
\r
5218 // specific to the DirectSound implementation.
\r
5219 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5220 LPCTSTR description,
\r
5222 LPVOID lpContext );
\r
5224 static const char* getErrorString( int code );
\r
5226 static unsigned __stdcall callbackHandler( void *ptr );
\r
5235 : found(false) { validId[0] = false; validId[1] = false; }
\r
5238 struct DsProbeData {
\r
5240 std::vector<struct DsDevice>* dsDevices;
\r
5243 RtApiDs :: RtApiDs()
\r
5245 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5246 // accept whatever the mainline chose for a threading model.
\r
5247 coInitialized_ = false;
\r
5248 HRESULT hr = CoInitialize( NULL );
\r
5249 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5252 RtApiDs :: ~RtApiDs()
\r
5254 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5255 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5258 // The DirectSound default output is always the first device.
\r
5259 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5264 // The DirectSound default input is always the first input device,
\r
5265 // which is the first capture device enumerated.
\r
5266 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5271 unsigned int RtApiDs :: getDeviceCount( void )
\r
5273 // Set query flag for previously found devices to false, so that we
\r
5274 // can check for any devices that have disappeared.
\r
5275 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5276 dsDevices[i].found = false;
\r
5278 // Query DirectSound devices.
\r
5279 struct DsProbeData probeInfo;
\r
5280 probeInfo.isInput = false;
\r
5281 probeInfo.dsDevices = &dsDevices;
\r
5282 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5283 if ( FAILED( result ) ) {
\r
5284 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5285 errorText_ = errorStream_.str();
\r
5286 error( RtAudioError::WARNING );
\r
5289 // Query DirectSoundCapture devices.
\r
5290 probeInfo.isInput = true;
\r
5291 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5292 if ( FAILED( result ) ) {
\r
5293 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5294 errorText_ = errorStream_.str();
\r
5295 error( RtAudioError::WARNING );
\r
5298 // Clean out any devices that may have disappeared.
\r
5299 std::vector< int > indices;
\r
5300 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5301 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5302 //unsigned int nErased = 0;
\r
5303 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5304 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5305 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5307 return static_cast<unsigned int>(dsDevices.size());
\r
5310 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5312 RtAudio::DeviceInfo info;
\r
5313 info.probed = false;
\r
5315 if ( dsDevices.size() == 0 ) {
\r
5316 // Force a query of all devices
\r
5318 if ( dsDevices.size() == 0 ) {
\r
5319 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5320 error( RtAudioError::INVALID_USE );
\r
5325 if ( device >= dsDevices.size() ) {
\r
5326 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5327 error( RtAudioError::INVALID_USE );
\r
5332 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5334 LPDIRECTSOUND output;
\r
5336 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5337 if ( FAILED( result ) ) {
\r
5338 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5339 errorText_ = errorStream_.str();
\r
5340 error( RtAudioError::WARNING );
\r
5344 outCaps.dwSize = sizeof( outCaps );
\r
5345 result = output->GetCaps( &outCaps );
\r
5346 if ( FAILED( result ) ) {
\r
5347 output->Release();
\r
5348 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5349 errorText_ = errorStream_.str();
\r
5350 error( RtAudioError::WARNING );
\r
5354 // Get output channel information.
\r
5355 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5357 // Get sample rate information.
\r
5358 info.sampleRates.clear();
\r
5359 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5360 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5361 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5362 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5364 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5365 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5369 // Get format information.
\r
5370 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5371 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5373 output->Release();
\r
5375 if ( getDefaultOutputDevice() == device )
\r
5376 info.isDefaultOutput = true;
\r
5378 if ( dsDevices[ device ].validId[1] == false ) {
\r
5379 info.name = dsDevices[ device ].name;
\r
5380 info.probed = true;
\r
5386 LPDIRECTSOUNDCAPTURE input;
\r
5387 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5388 if ( FAILED( result ) ) {
\r
5389 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5390 errorText_ = errorStream_.str();
\r
5391 error( RtAudioError::WARNING );
\r
5396 inCaps.dwSize = sizeof( inCaps );
\r
5397 result = input->GetCaps( &inCaps );
\r
5398 if ( FAILED( result ) ) {
\r
5400 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5401 errorText_ = errorStream_.str();
\r
5402 error( RtAudioError::WARNING );
\r
5406 // Get input channel information.
\r
5407 info.inputChannels = inCaps.dwChannels;
\r
5409 // Get sample rate and format information.
\r
5410 std::vector<unsigned int> rates;
\r
5411 if ( inCaps.dwChannels >= 2 ) {
\r
5412 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5413 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5414 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5415 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5416 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5417 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5418 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5419 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5421 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5422 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5423 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5424 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5425 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5427 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5428 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5429 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5430 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5431 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5434 else if ( inCaps.dwChannels == 1 ) {
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5440 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5441 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5442 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5444 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5446 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5447 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5448 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5450 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5453 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5454 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5457 else info.inputChannels = 0; // technically, this would be an error
\r
5461 if ( info.inputChannels == 0 ) return info;
\r
5463 // Copy the supported rates to the info structure but avoid duplication.
\r
5465 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5467 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5468 if ( rates[i] == info.sampleRates[j] ) {
\r
5473 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5475 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5477 // If device opens for both playback and capture, we determine the channels.
\r
5478 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5479 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5481 if ( device == 0 ) info.isDefaultInput = true;
\r
5483 // Copy name and return.
\r
5484 info.name = dsDevices[ device ].name;
\r
5485 info.probed = true;
\r
5489 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5490 unsigned int firstChannel, unsigned int sampleRate,
\r
5491 RtAudioFormat format, unsigned int *bufferSize,
\r
5492 RtAudio::StreamOptions *options )
\r
5494 if ( channels + firstChannel > 2 ) {
\r
5495 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5499 size_t nDevices = dsDevices.size();
\r
5500 if ( nDevices == 0 ) {
\r
5501 // This should not happen because a check is made before this function is called.
\r
5502 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5506 if ( device >= nDevices ) {
\r
5507 // This should not happen because a check is made before this function is called.
\r
5508 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5512 if ( mode == OUTPUT ) {
\r
5513 if ( dsDevices[ device ].validId[0] == false ) {
\r
5514 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5515 errorText_ = errorStream_.str();
\r
5519 else { // mode == INPUT
\r
5520 if ( dsDevices[ device ].validId[1] == false ) {
\r
5521 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5522 errorText_ = errorStream_.str();
\r
5527 // According to a note in PortAudio, using GetDesktopWindow()
\r
5528 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5529 // that occur when the application's window is not the foreground
\r
5530 // window. Also, if the application window closes before the
\r
5531 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5532 // problems when using GetDesktopWindow() but it seems fine now
\r
5533 // (January 2010). I'll leave it commented here.
\r
5534 // HWND hWnd = GetForegroundWindow();
\r
5535 HWND hWnd = GetDesktopWindow();
\r
5537 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5538 // two. This is a judgement call and a value of two is probably too
\r
5539 // low for capture, but it should work for playback.
\r
5541 if ( options ) nBuffers = options->numberOfBuffers;
\r
5542 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5543 if ( nBuffers < 2 ) nBuffers = 3;
\r
5545 // Check the lower range of the user-specified buffer size and set
\r
5546 // (arbitrarily) to a lower bound of 32.
\r
5547 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5549 // Create the wave format structure. The data format setting will
\r
5550 // be determined later.
\r
5551 WAVEFORMATEX waveFormat;
\r
5552 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5553 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5554 waveFormat.nChannels = channels + firstChannel;
\r
5555 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5557 // Determine the device buffer size. By default, we'll use the value
\r
5558 // defined above (32K), but we will grow it to make allowances for
\r
5559 // very large software buffer sizes.
\r
5560 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5561 DWORD dsPointerLeadTime = 0;
\r
5563 void *ohandle = 0, *bhandle = 0;
\r
5565 if ( mode == OUTPUT ) {
\r
5567 LPDIRECTSOUND output;
\r
5568 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5569 if ( FAILED( result ) ) {
\r
5570 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5571 errorText_ = errorStream_.str();
\r
5576 outCaps.dwSize = sizeof( outCaps );
\r
5577 result = output->GetCaps( &outCaps );
\r
5578 if ( FAILED( result ) ) {
\r
5579 output->Release();
\r
5580 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5581 errorText_ = errorStream_.str();
\r
5585 // Check channel information.
\r
5586 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5587 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5588 errorText_ = errorStream_.str();
\r
5592 // Check format information. Use 16-bit format unless not
\r
5593 // supported or user requests 8-bit.
\r
5594 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5595 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5596 waveFormat.wBitsPerSample = 16;
\r
5597 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5600 waveFormat.wBitsPerSample = 8;
\r
5601 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5603 stream_.userFormat = format;
\r
5605 // Update wave format structure and buffer information.
\r
5606 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5607 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5608 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5610 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5611 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5612 dsBufferSize *= 2;
\r
5614 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5615 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5616 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5617 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5618 if ( FAILED( result ) ) {
\r
5619 output->Release();
\r
5620 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5621 errorText_ = errorStream_.str();
\r
5625 // Even though we will write to the secondary buffer, we need to
\r
5626 // access the primary buffer to set the correct output format
\r
5627 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5628 // buffer description.
\r
5629 DSBUFFERDESC bufferDescription;
\r
5630 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5631 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5632 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5634 // Obtain the primary buffer
\r
5635 LPDIRECTSOUNDBUFFER buffer;
\r
5636 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5637 if ( FAILED( result ) ) {
\r
5638 output->Release();
\r
5639 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5640 errorText_ = errorStream_.str();
\r
5644 // Set the primary DS buffer sound format.
\r
5645 result = buffer->SetFormat( &waveFormat );
\r
5646 if ( FAILED( result ) ) {
\r
5647 output->Release();
\r
5648 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5649 errorText_ = errorStream_.str();
\r
5653 // Setup the secondary DS buffer description.
\r
5654 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5655 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5656 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5657 DSBCAPS_GLOBALFOCUS |
\r
5658 DSBCAPS_GETCURRENTPOSITION2 |
\r
5659 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5660 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5661 bufferDescription.lpwfxFormat = &waveFormat;
\r
5663 // Try to create the secondary DS buffer. If that doesn't work,
\r
5664 // try to use software mixing. Otherwise, there's a problem.
\r
5665 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5666 if ( FAILED( result ) ) {
\r
5667 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5668 DSBCAPS_GLOBALFOCUS |
\r
5669 DSBCAPS_GETCURRENTPOSITION2 |
\r
5670 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5671 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5672 if ( FAILED( result ) ) {
\r
5673 output->Release();
\r
5674 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5675 errorText_ = errorStream_.str();
\r
5680 // Get the buffer size ... might be different from what we specified.
\r
5682 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5683 result = buffer->GetCaps( &dsbcaps );
\r
5684 if ( FAILED( result ) ) {
\r
5685 output->Release();
\r
5686 buffer->Release();
\r
5687 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5688 errorText_ = errorStream_.str();
\r
5692 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5694 // Lock the DS buffer
\r
5697 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5698 if ( FAILED( result ) ) {
\r
5699 output->Release();
\r
5700 buffer->Release();
\r
5701 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5702 errorText_ = errorStream_.str();
\r
5706 // Zero the DS buffer
\r
5707 ZeroMemory( audioPtr, dataLen );
\r
5709 // Unlock the DS buffer
\r
5710 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5711 if ( FAILED( result ) ) {
\r
5712 output->Release();
\r
5713 buffer->Release();
\r
5714 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5715 errorText_ = errorStream_.str();
\r
5719 ohandle = (void *) output;
\r
5720 bhandle = (void *) buffer;
\r
5723 if ( mode == INPUT ) {
\r
5725 LPDIRECTSOUNDCAPTURE input;
\r
5726 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5727 if ( FAILED( result ) ) {
\r
5728 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5729 errorText_ = errorStream_.str();
\r
5734 inCaps.dwSize = sizeof( inCaps );
\r
5735 result = input->GetCaps( &inCaps );
\r
5736 if ( FAILED( result ) ) {
\r
5738 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5739 errorText_ = errorStream_.str();
\r
5743 // Check channel information.
\r
5744 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5745 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5749 // Check format information. Use 16-bit format unless user
\r
5750 // requests 8-bit.
\r
5751 DWORD deviceFormats;
\r
5752 if ( channels + firstChannel == 2 ) {
\r
5753 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5754 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5755 waveFormat.wBitsPerSample = 8;
\r
5756 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5758 else { // assume 16-bit is supported
\r
5759 waveFormat.wBitsPerSample = 16;
\r
5760 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5763 else { // channel == 1
\r
5764 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5765 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5766 waveFormat.wBitsPerSample = 8;
\r
5767 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5769 else { // assume 16-bit is supported
\r
5770 waveFormat.wBitsPerSample = 16;
\r
5771 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5774 stream_.userFormat = format;
\r
5776 // Update wave format structure and buffer information.
\r
5777 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5778 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5779 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5781 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5782 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5783 dsBufferSize *= 2;
\r
5785 // Setup the secondary DS buffer description.
\r
5786 DSCBUFFERDESC bufferDescription;
\r
5787 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5788 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5789 bufferDescription.dwFlags = 0;
\r
5790 bufferDescription.dwReserved = 0;
\r
5791 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5792 bufferDescription.lpwfxFormat = &waveFormat;
\r
5794 // Create the capture buffer.
\r
5795 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5796 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5797 if ( FAILED( result ) ) {
\r
5799 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5800 errorText_ = errorStream_.str();
\r
5804 // Get the buffer size ... might be different from what we specified.
\r
5805 DSCBCAPS dscbcaps;
\r
5806 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5807 result = buffer->GetCaps( &dscbcaps );
\r
5808 if ( FAILED( result ) ) {
\r
5810 buffer->Release();
\r
5811 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5812 errorText_ = errorStream_.str();
\r
5816 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5818 // NOTE: We could have a problem here if this is a duplex stream
\r
5819 // and the play and capture hardware buffer sizes are different
\r
5820 // (I'm actually not sure if that is a problem or not).
\r
5821 // Currently, we are not verifying that.
\r
5823 // Lock the capture buffer
\r
5826 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5827 if ( FAILED( result ) ) {
\r
5829 buffer->Release();
\r
5830 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5831 errorText_ = errorStream_.str();
\r
5835 // Zero the buffer
\r
5836 ZeroMemory( audioPtr, dataLen );
\r
5838 // Unlock the buffer
\r
5839 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5840 if ( FAILED( result ) ) {
\r
5842 buffer->Release();
\r
5843 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5844 errorText_ = errorStream_.str();
\r
5848 ohandle = (void *) input;
\r
5849 bhandle = (void *) buffer;
\r
5852 // Set various stream parameters
\r
5853 DsHandle *handle = 0;
\r
5854 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5855 stream_.nUserChannels[mode] = channels;
\r
5856 stream_.bufferSize = *bufferSize;
\r
5857 stream_.channelOffset[mode] = firstChannel;
\r
5858 stream_.deviceInterleaved[mode] = true;
\r
5859 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5860 else stream_.userInterleaved = true;
\r
5862 // Set flag for buffer conversion
\r
5863 stream_.doConvertBuffer[mode] = false;
\r
5864 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5865 stream_.doConvertBuffer[mode] = true;
\r
5866 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5867 stream_.doConvertBuffer[mode] = true;
\r
5868 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5869 stream_.nUserChannels[mode] > 1 )
\r
5870 stream_.doConvertBuffer[mode] = true;
\r
5872 // Allocate necessary internal buffers
\r
5873 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5874 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5875 if ( stream_.userBuffer[mode] == NULL ) {
\r
5876 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5880 if ( stream_.doConvertBuffer[mode] ) {
\r
5882 bool makeBuffer = true;
\r
5883 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5884 if ( mode == INPUT ) {
\r
5885 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5886 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5887 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5891 if ( makeBuffer ) {
\r
5892 bufferBytes *= *bufferSize;
\r
5893 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5894 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5895 if ( stream_.deviceBuffer == NULL ) {
\r
5896 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5902 // Allocate our DsHandle structures for the stream.
\r
5903 if ( stream_.apiHandle == 0 ) {
\r
5905 handle = new DsHandle;
\r
5907 catch ( std::bad_alloc& ) {
\r
5908 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5912 // Create a manual-reset event.
\r
5913 handle->condition = CreateEvent( NULL, // no security
\r
5914 TRUE, // manual-reset
\r
5915 FALSE, // non-signaled initially
\r
5916 NULL ); // unnamed
\r
5917 stream_.apiHandle = (void *) handle;
\r
5920 handle = (DsHandle *) stream_.apiHandle;
\r
5921 handle->id[mode] = ohandle;
\r
5922 handle->buffer[mode] = bhandle;
\r
5923 handle->dsBufferSize[mode] = dsBufferSize;
\r
5924 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5926 stream_.device[mode] = device;
\r
5927 stream_.state = STREAM_STOPPED;
\r
5928 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5929 // We had already set up an output stream.
\r
5930 stream_.mode = DUPLEX;
\r
5932 stream_.mode = mode;
\r
5933 stream_.nBuffers = nBuffers;
\r
5934 stream_.sampleRate = sampleRate;
\r
5936 // Setup the buffer conversion information structure.
\r
5937 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5939 // Setup the callback thread.
\r
5940 if ( stream_.callbackInfo.isRunning == false ) {
\r
5941 unsigned threadId;
\r
5942 stream_.callbackInfo.isRunning = true;
\r
5943 stream_.callbackInfo.object = (void *) this;
\r
5944 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5945 &stream_.callbackInfo, 0, &threadId );
\r
5946 if ( stream_.callbackInfo.thread == 0 ) {
\r
5947 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5951 // Boost DS thread priority
\r
5952 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5958 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5959 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5960 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5961 if ( buffer ) buffer->Release();
\r
5962 object->Release();
\r
5964 if ( handle->buffer[1] ) {
\r
5965 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5966 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5967 if ( buffer ) buffer->Release();
\r
5968 object->Release();
\r
5970 CloseHandle( handle->condition );
\r
5972 stream_.apiHandle = 0;
\r
5975 for ( int i=0; i<2; i++ ) {
\r
5976 if ( stream_.userBuffer[i] ) {
\r
5977 free( stream_.userBuffer[i] );
\r
5978 stream_.userBuffer[i] = 0;
\r
5982 if ( stream_.deviceBuffer ) {
\r
5983 free( stream_.deviceBuffer );
\r
5984 stream_.deviceBuffer = 0;
\r
5987 stream_.state = STREAM_CLOSED;
\r
5991 void RtApiDs :: closeStream()
\r
5993 if ( stream_.state == STREAM_CLOSED ) {
\r
5994 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5995 error( RtAudioError::WARNING );
\r
5999 // Stop the callback thread.
\r
6000 stream_.callbackInfo.isRunning = false;
\r
6001 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6002 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6004 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6006 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6007 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6008 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6011 buffer->Release();
\r
6013 object->Release();
\r
6015 if ( handle->buffer[1] ) {
\r
6016 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6017 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6020 buffer->Release();
\r
6022 object->Release();
\r
6024 CloseHandle( handle->condition );
\r
6026 stream_.apiHandle = 0;
\r
6029 for ( int i=0; i<2; i++ ) {
\r
6030 if ( stream_.userBuffer[i] ) {
\r
6031 free( stream_.userBuffer[i] );
\r
6032 stream_.userBuffer[i] = 0;
\r
6036 if ( stream_.deviceBuffer ) {
\r
6037 free( stream_.deviceBuffer );
\r
6038 stream_.deviceBuffer = 0;
\r
6041 stream_.mode = UNINITIALIZED;
\r
6042 stream_.state = STREAM_CLOSED;
\r
6045 void RtApiDs :: startStream()
\r
6048 if ( stream_.state == STREAM_RUNNING ) {
\r
6049 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6050 error( RtAudioError::WARNING );
\r
6054 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6056 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6057 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6058 // this is already in effect.
\r
6059 timeBeginPeriod( 1 );
\r
6061 buffersRolling = false;
\r
6062 duplexPrerollBytes = 0;
\r
6064 if ( stream_.mode == DUPLEX ) {
\r
6065 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6066 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6069 HRESULT result = 0;
\r
6070 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6072 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6073 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6074 if ( FAILED( result ) ) {
\r
6075 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6076 errorText_ = errorStream_.str();
\r
6081 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6083 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6084 result = buffer->Start( DSCBSTART_LOOPING );
\r
6085 if ( FAILED( result ) ) {
\r
6086 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6087 errorText_ = errorStream_.str();
\r
6092 handle->drainCounter = 0;
\r
6093 handle->internalDrain = false;
\r
6094 ResetEvent( handle->condition );
\r
6095 stream_.state = STREAM_RUNNING;
\r
6098 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6101 void RtApiDs :: stopStream()
\r
6104 if ( stream_.state == STREAM_STOPPED ) {
\r
6105 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6106 error( RtAudioError::WARNING );
\r
6110 HRESULT result = 0;
\r
6113 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6114 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6115 if ( handle->drainCounter == 0 ) {
\r
6116 handle->drainCounter = 2;
\r
6117 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6120 stream_.state = STREAM_STOPPED;
\r
6122 MUTEX_LOCK( &stream_.mutex );
\r
6124 // Stop the buffer and clear memory
\r
6125 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6126 result = buffer->Stop();
\r
6127 if ( FAILED( result ) ) {
\r
6128 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6129 errorText_ = errorStream_.str();
\r
6133 // Lock the buffer and clear it so that if we start to play again,
\r
6134 // we won't have old data playing.
\r
6135 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6136 if ( FAILED( result ) ) {
\r
6137 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6138 errorText_ = errorStream_.str();
\r
6142 // Zero the DS buffer
\r
6143 ZeroMemory( audioPtr, dataLen );
\r
6145 // Unlock the DS buffer
\r
6146 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6147 if ( FAILED( result ) ) {
\r
6148 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6149 errorText_ = errorStream_.str();
\r
6153 // If we start playing again, we must begin at beginning of buffer.
\r
6154 handle->bufferPointer[0] = 0;
\r
6157 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6158 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6162 stream_.state = STREAM_STOPPED;
\r
6164 if ( stream_.mode != DUPLEX )
\r
6165 MUTEX_LOCK( &stream_.mutex );
\r
6167 result = buffer->Stop();
\r
6168 if ( FAILED( result ) ) {
\r
6169 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6170 errorText_ = errorStream_.str();
\r
6174 // Lock the buffer and clear it so that if we start to play again,
\r
6175 // we won't have old data playing.
\r
6176 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6177 if ( FAILED( result ) ) {
\r
6178 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6179 errorText_ = errorStream_.str();
\r
6183 // Zero the DS buffer
\r
6184 ZeroMemory( audioPtr, dataLen );
\r
6186 // Unlock the DS buffer
\r
6187 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6188 if ( FAILED( result ) ) {
\r
6189 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6190 errorText_ = errorStream_.str();
\r
6194 // If we start recording again, we must begin at beginning of buffer.
\r
6195 handle->bufferPointer[1] = 0;
\r
6199 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6200 MUTEX_UNLOCK( &stream_.mutex );
\r
6202 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6205 void RtApiDs :: abortStream()
\r
6208 if ( stream_.state == STREAM_STOPPED ) {
\r
6209 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6210 error( RtAudioError::WARNING );
\r
6214 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6215 handle->drainCounter = 2;
\r
6220 void RtApiDs :: callbackEvent()
\r
6222 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6223 Sleep( 50 ); // sleep 50 milliseconds
\r
6227 if ( stream_.state == STREAM_CLOSED ) {
\r
6228 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6229 error( RtAudioError::WARNING );
\r
6233 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6234 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6236 // Check if we were draining the stream and signal is finished.
\r
6237 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6239 stream_.state = STREAM_STOPPING;
\r
6240 if ( handle->internalDrain == false )
\r
6241 SetEvent( handle->condition );
\r
6247 // Invoke user callback to get fresh output data UNLESS we are
\r
6248 // draining stream.
\r
6249 if ( handle->drainCounter == 0 ) {
\r
6250 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6251 double streamTime = getStreamTime();
\r
6252 RtAudioStreamStatus status = 0;
\r
6253 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6254 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6255 handle->xrun[0] = false;
\r
6257 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6258 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6259 handle->xrun[1] = false;
\r
6261 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6262 stream_.bufferSize, streamTime, status, info->userData );
\r
6263 if ( cbReturnValue == 2 ) {
\r
6264 stream_.state = STREAM_STOPPING;
\r
6265 handle->drainCounter = 2;
\r
6269 else if ( cbReturnValue == 1 ) {
\r
6270 handle->drainCounter = 1;
\r
6271 handle->internalDrain = true;
\r
6276 DWORD currentWritePointer, safeWritePointer;
\r
6277 DWORD currentReadPointer, safeReadPointer;
\r
6278 UINT nextWritePointer;
\r
6280 LPVOID buffer1 = NULL;
\r
6281 LPVOID buffer2 = NULL;
\r
6282 DWORD bufferSize1 = 0;
\r
6283 DWORD bufferSize2 = 0;
\r
6288 MUTEX_LOCK( &stream_.mutex );
\r
6289 if ( stream_.state == STREAM_STOPPED ) {
\r
6290 MUTEX_UNLOCK( &stream_.mutex );
\r
6294 if ( buffersRolling == false ) {
\r
6295 if ( stream_.mode == DUPLEX ) {
\r
6296 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6298 // It takes a while for the devices to get rolling. As a result,
\r
6299 // there's no guarantee that the capture and write device pointers
\r
6300 // will move in lockstep. Wait here for both devices to start
\r
6301 // rolling, and then set our buffer pointers accordingly.
\r
6302 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6303 // bytes later than the write buffer.
\r
6305 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6306 // take place between the two GetCurrentPosition calls... but I'm
\r
6307 // really not sure how to solve the problem. Temporarily boost to
\r
6308 // Realtime priority, maybe; but I'm not sure what priority the
\r
6309 // DirectSound service threads run at. We *should* be roughly
\r
6310 // within a ms or so of correct.
\r
6312 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6313 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6315 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6317 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6318 if ( FAILED( result ) ) {
\r
6319 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6320 errorText_ = errorStream_.str();
\r
6321 MUTEX_UNLOCK( &stream_.mutex );
\r
6322 error( RtAudioError::SYSTEM_ERROR );
\r
6325 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6326 if ( FAILED( result ) ) {
\r
6327 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6328 errorText_ = errorStream_.str();
\r
6329 MUTEX_UNLOCK( &stream_.mutex );
\r
6330 error( RtAudioError::SYSTEM_ERROR );
\r
6334 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6335 if ( FAILED( result ) ) {
\r
6336 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6337 errorText_ = errorStream_.str();
\r
6338 MUTEX_UNLOCK( &stream_.mutex );
\r
6339 error( RtAudioError::SYSTEM_ERROR );
\r
6342 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6343 if ( FAILED( result ) ) {
\r
6344 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6345 errorText_ = errorStream_.str();
\r
6346 MUTEX_UNLOCK( &stream_.mutex );
\r
6347 error( RtAudioError::SYSTEM_ERROR );
\r
6350 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6354 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6356 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6357 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6358 handle->bufferPointer[1] = safeReadPointer;
\r
6360 else if ( stream_.mode == OUTPUT ) {
\r
6362 // Set the proper nextWritePosition after initial startup.
\r
6363 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6364 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6365 if ( FAILED( result ) ) {
\r
6366 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6367 errorText_ = errorStream_.str();
\r
6368 MUTEX_UNLOCK( &stream_.mutex );
\r
6369 error( RtAudioError::SYSTEM_ERROR );
\r
6372 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6373 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6376 buffersRolling = true;
\r
6379 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6381 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6383 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6384 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6385 bufferBytes *= formatBytes( stream_.userFormat );
\r
6386 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6389 // Setup parameters and do buffer conversion if necessary.
\r
6390 if ( stream_.doConvertBuffer[0] ) {
\r
6391 buffer = stream_.deviceBuffer;
\r
6392 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6393 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6394 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6397 buffer = stream_.userBuffer[0];
\r
6398 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6399 bufferBytes *= formatBytes( stream_.userFormat );
\r
6402 // No byte swapping necessary in DirectSound implementation.
\r
6404 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6405 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6407 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6408 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6410 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6411 nextWritePointer = handle->bufferPointer[0];
\r
6413 DWORD endWrite, leadPointer;
\r
6415 // Find out where the read and "safe write" pointers are.
\r
6416 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6417 if ( FAILED( result ) ) {
\r
6418 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6419 errorText_ = errorStream_.str();
\r
6420 error( RtAudioError::SYSTEM_ERROR );
\r
6424 // We will copy our output buffer into the region between
\r
6425 // safeWritePointer and leadPointer. If leadPointer is not
\r
6426 // beyond the next endWrite position, wait until it is.
\r
6427 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6428 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6429 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6430 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6431 endWrite = nextWritePointer + bufferBytes;
\r
6433 // Check whether the entire write region is behind the play pointer.
\r
6434 if ( leadPointer >= endWrite ) break;
\r
6436 // If we are here, then we must wait until the leadPointer advances
\r
6437 // beyond the end of our next write region. We use the
\r
6438 // Sleep() function to suspend operation until that happens.
\r
6439 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6440 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6441 if ( millis < 1.0 ) millis = 1.0;
\r
6442 Sleep( (DWORD) millis );
\r
6445 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6446 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6447 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6448 handle->xrun[0] = true;
\r
6449 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6450 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6451 handle->bufferPointer[0] = nextWritePointer;
\r
6452 endWrite = nextWritePointer + bufferBytes;
\r
6455 // Lock free space in the buffer
\r
6456 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6457 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6458 if ( FAILED( result ) ) {
\r
6459 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6460 errorText_ = errorStream_.str();
\r
6461 MUTEX_UNLOCK( &stream_.mutex );
\r
6462 error( RtAudioError::SYSTEM_ERROR );
\r
6466 // Copy our buffer into the DS buffer
\r
6467 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6468 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6470 // Update our buffer offset and unlock sound buffer
\r
6471 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6472 if ( FAILED( result ) ) {
\r
6473 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6474 errorText_ = errorStream_.str();
\r
6475 MUTEX_UNLOCK( &stream_.mutex );
\r
6476 error( RtAudioError::SYSTEM_ERROR );
\r
6479 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6480 handle->bufferPointer[0] = nextWritePointer;
\r
6483 // Don't bother draining input
\r
6484 if ( handle->drainCounter ) {
\r
6485 handle->drainCounter++;
\r
6489 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6491 // Setup parameters.
\r
6492 if ( stream_.doConvertBuffer[1] ) {
\r
6493 buffer = stream_.deviceBuffer;
\r
6494 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6495 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6498 buffer = stream_.userBuffer[1];
\r
6499 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6500 bufferBytes *= formatBytes( stream_.userFormat );
\r
6503 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6504 long nextReadPointer = handle->bufferPointer[1];
\r
6505 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6507 // Find out where the write and "safe read" pointers are.
\r
6508 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6509 if ( FAILED( result ) ) {
\r
6510 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6511 errorText_ = errorStream_.str();
\r
6512 MUTEX_UNLOCK( &stream_.mutex );
\r
6513 error( RtAudioError::SYSTEM_ERROR );
\r
6517 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6518 DWORD endRead = nextReadPointer + bufferBytes;
\r
6520 // Handling depends on whether we are INPUT or DUPLEX.
\r
6521 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6522 // then a wait here will drag the write pointers into the forbidden zone.
\r
6524 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6525 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6526 // practical way to sync up the read and write pointers reliably, given the
\r
6527 // the very complex relationship between phase and increment of the read and write
\r
6530 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6531 // provide a pre-roll period of 0.5 seconds in which we return
\r
6532 // zeros from the read buffer while the pointers sync up.
\r
6534 if ( stream_.mode == DUPLEX ) {
\r
6535 if ( safeReadPointer < endRead ) {
\r
6536 if ( duplexPrerollBytes <= 0 ) {
\r
6537 // Pre-roll time over. Be more agressive.
\r
6538 int adjustment = endRead-safeReadPointer;
\r
6540 handle->xrun[1] = true;
\r
6542 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6543 // and perform fine adjustments later.
\r
6544 // - small adjustments: back off by twice as much.
\r
6545 if ( adjustment >= 2*bufferBytes )
\r
6546 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6548 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6550 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6554 // In pre=roll time. Just do it.
\r
6555 nextReadPointer = safeReadPointer - bufferBytes;
\r
6556 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6558 endRead = nextReadPointer + bufferBytes;
\r
6561 else { // mode == INPUT
\r
6562 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6563 // See comments for playback.
\r
6564 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6565 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6566 if ( millis < 1.0 ) millis = 1.0;
\r
6567 Sleep( (DWORD) millis );
\r
6569 // Wake up and find out where we are now.
\r
6570 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6571 if ( FAILED( result ) ) {
\r
6572 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6573 errorText_ = errorStream_.str();
\r
6574 MUTEX_UNLOCK( &stream_.mutex );
\r
6575 error( RtAudioError::SYSTEM_ERROR );
\r
6579 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6583 // Lock free space in the buffer
\r
6584 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6585 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6586 if ( FAILED( result ) ) {
\r
6587 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6588 errorText_ = errorStream_.str();
\r
6589 MUTEX_UNLOCK( &stream_.mutex );
\r
6590 error( RtAudioError::SYSTEM_ERROR );
\r
6594 if ( duplexPrerollBytes <= 0 ) {
\r
6595 // Copy our buffer into the DS buffer
\r
6596 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6597 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6600 memset( buffer, 0, bufferSize1 );
\r
6601 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6602 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6605 // Update our buffer offset and unlock sound buffer
\r
6606 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6607 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6608 if ( FAILED( result ) ) {
\r
6609 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6610 errorText_ = errorStream_.str();
\r
6611 MUTEX_UNLOCK( &stream_.mutex );
\r
6612 error( RtAudioError::SYSTEM_ERROR );
\r
6615 handle->bufferPointer[1] = nextReadPointer;
\r
6617 // No byte swapping necessary in DirectSound implementation.
\r
6619 // If necessary, convert 8-bit data from unsigned to signed.
\r
6620 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6621 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6623 // Do buffer conversion if necessary.
\r
6624 if ( stream_.doConvertBuffer[1] )
\r
6625 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6629 MUTEX_UNLOCK( &stream_.mutex );
\r
6630 RtApi::tickStreamTime();
\r
6633 // Definitions for utility functions and callbacks
\r
6634 // specific to the DirectSound implementation.
\r
6636 static unsigned __stdcall callbackHandler( void *ptr )
\r
6638 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6639 RtApiDs *object = (RtApiDs *) info->object;
\r
6640 bool* isRunning = &info->isRunning;
\r
6642 while ( *isRunning == true ) {
\r
6643 object->callbackEvent();
\r
6646 _endthreadex( 0 );
\r
6650 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6651 LPCTSTR description,
\r
6652 LPCTSTR /*module*/,
\r
6653 LPVOID lpContext )
\r
6655 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6656 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6659 bool validDevice = false;
\r
6660 if ( probeInfo.isInput == true ) {
\r
6662 LPDIRECTSOUNDCAPTURE object;
\r
6664 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6665 if ( hr != DS_OK ) return TRUE;
\r
6667 caps.dwSize = sizeof(caps);
\r
6668 hr = object->GetCaps( &caps );
\r
6669 if ( hr == DS_OK ) {
\r
6670 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6671 validDevice = true;
\r
6673 object->Release();
\r
6677 LPDIRECTSOUND object;
\r
6678 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6679 if ( hr != DS_OK ) return TRUE;
\r
6681 caps.dwSize = sizeof(caps);
\r
6682 hr = object->GetCaps( &caps );
\r
6683 if ( hr == DS_OK ) {
\r
6684 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6685 validDevice = true;
\r
6687 object->Release();
\r
6690 // If good device, then save its name and guid.
\r
6691 std::string name = convertCharPointerToStdString( description );
\r
6692 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6693 if ( lpguid == NULL )
\r
6694 name = "Default Device";
\r
6695 if ( validDevice ) {
\r
6696 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6697 if ( dsDevices[i].name == name ) {
\r
6698 dsDevices[i].found = true;
\r
6699 if ( probeInfo.isInput ) {
\r
6700 dsDevices[i].id[1] = lpguid;
\r
6701 dsDevices[i].validId[1] = true;
\r
6704 dsDevices[i].id[0] = lpguid;
\r
6705 dsDevices[i].validId[0] = true;
\r
6712 device.name = name;
\r
6713 device.found = true;
\r
6714 if ( probeInfo.isInput ) {
\r
6715 device.id[1] = lpguid;
\r
6716 device.validId[1] = true;
\r
6719 device.id[0] = lpguid;
\r
6720 device.validId[0] = true;
\r
6722 dsDevices.push_back( device );
\r
6728 static const char* getErrorString( int code )
\r
6732 case DSERR_ALLOCATED:
\r
6733 return "Already allocated";
\r
6735 case DSERR_CONTROLUNAVAIL:
\r
6736 return "Control unavailable";
\r
6738 case DSERR_INVALIDPARAM:
\r
6739 return "Invalid parameter";
\r
6741 case DSERR_INVALIDCALL:
\r
6742 return "Invalid call";
\r
6744 case DSERR_GENERIC:
\r
6745 return "Generic error";
\r
6747 case DSERR_PRIOLEVELNEEDED:
\r
6748 return "Priority level needed";
\r
6750 case DSERR_OUTOFMEMORY:
\r
6751 return "Out of memory";
\r
6753 case DSERR_BADFORMAT:
\r
6754 return "The sample rate or the channel format is not supported";
\r
6756 case DSERR_UNSUPPORTED:
\r
6757 return "Not supported";
\r
6759 case DSERR_NODRIVER:
\r
6760 return "No driver";
\r
6762 case DSERR_ALREADYINITIALIZED:
\r
6763 return "Already initialized";
\r
6765 case DSERR_NOAGGREGATION:
\r
6766 return "No aggregation";
\r
6768 case DSERR_BUFFERLOST:
\r
6769 return "Buffer lost";
\r
6771 case DSERR_OTHERAPPHASPRIO:
\r
6772 return "Another application already has priority";
\r
6774 case DSERR_UNINITIALIZED:
\r
6775 return "Uninitialized";
\r
6778 return "DirectSound unknown error";
\r
6781 //******************** End of __WINDOWS_DS__ *********************//
\r
6785 #if defined(__LINUX_ALSA__)
\r
6787 #include <alsa/asoundlib.h>
\r
6788 #include <unistd.h>
\r
6790 // A structure to hold various information related to the ALSA API
\r
6791 // implementation.
\r
6792 struct AlsaHandle {
\r
6793 snd_pcm_t *handles[2];
\r
6794 bool synchronized;
\r
6796 pthread_cond_t runnable_cv;
\r
6800 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6803 static void *alsaCallbackHandler( void * ptr );
\r
6805 RtApiAlsa :: RtApiAlsa()
\r
6807 // Nothing to do here.
\r
6810 RtApiAlsa :: ~RtApiAlsa()
\r
6812 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6815 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6817 unsigned nDevices = 0;
\r
6818 int result, subdevice, card;
\r
6820 snd_ctl_t *handle;
\r
6822 // Count cards and devices
\r
6824 snd_card_next( &card );
\r
6825 while ( card >= 0 ) {
\r
6826 sprintf( name, "hw:%d", card );
\r
6827 result = snd_ctl_open( &handle, name, 0 );
\r
6828 if ( result < 0 ) {
\r
6829 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6830 errorText_ = errorStream_.str();
\r
6831 error( RtAudioError::WARNING );
\r
6836 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6837 if ( result < 0 ) {
\r
6838 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6839 errorText_ = errorStream_.str();
\r
6840 error( RtAudioError::WARNING );
\r
6843 if ( subdevice < 0 )
\r
6848 snd_ctl_close( handle );
\r
6849 snd_card_next( &card );
\r
6852 result = snd_ctl_open( &handle, "default", 0 );
\r
6853 if (result == 0) {
\r
6855 snd_ctl_close( handle );
\r
6861 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6863 RtAudio::DeviceInfo info;
\r
6864 info.probed = false;
\r
6866 unsigned nDevices = 0;
\r
6867 int result, subdevice, card;
\r
6869 snd_ctl_t *chandle;
\r
6871 // Count cards and devices
\r
6874 snd_card_next( &card );
\r
6875 while ( card >= 0 ) {
\r
6876 sprintf( name, "hw:%d", card );
\r
6877 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6878 if ( result < 0 ) {
\r
6879 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6880 errorText_ = errorStream_.str();
\r
6881 error( RtAudioError::WARNING );
\r
6886 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6887 if ( result < 0 ) {
\r
6888 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6889 errorText_ = errorStream_.str();
\r
6890 error( RtAudioError::WARNING );
\r
6893 if ( subdevice < 0 ) break;
\r
6894 if ( nDevices == device ) {
\r
6895 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6901 snd_ctl_close( chandle );
\r
6902 snd_card_next( &card );
\r
6905 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6906 if ( result == 0 ) {
\r
6907 if ( nDevices == device ) {
\r
6908 strcpy( name, "default" );
\r
6914 if ( nDevices == 0 ) {
\r
6915 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6916 error( RtAudioError::INVALID_USE );
\r
6920 if ( device >= nDevices ) {
\r
6921 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6922 error( RtAudioError::INVALID_USE );
\r
6928 // If a stream is already open, we cannot probe the stream devices.
\r
6929 // Thus, use the saved results.
\r
6930 if ( stream_.state != STREAM_CLOSED &&
\r
6931 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6932 snd_ctl_close( chandle );
\r
6933 if ( device >= devices_.size() ) {
\r
6934 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6935 error( RtAudioError::WARNING );
\r
6938 return devices_[ device ];
\r
6941 int openMode = SND_PCM_ASYNC;
\r
6942 snd_pcm_stream_t stream;
\r
6943 snd_pcm_info_t *pcminfo;
\r
6944 snd_pcm_info_alloca( &pcminfo );
\r
6945 snd_pcm_t *phandle;
\r
6946 snd_pcm_hw_params_t *params;
\r
6947 snd_pcm_hw_params_alloca( ¶ms );
\r
6949 // First try for playback unless default device (which has subdev -1)
\r
6950 stream = SND_PCM_STREAM_PLAYBACK;
\r
6951 snd_pcm_info_set_stream( pcminfo, stream );
\r
6952 if ( subdevice != -1 ) {
\r
6953 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6954 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6956 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6957 if ( result < 0 ) {
\r
6958 // Device probably doesn't support playback.
\r
6959 goto captureProbe;
\r
6963 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6964 if ( result < 0 ) {
\r
6965 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6966 errorText_ = errorStream_.str();
\r
6967 error( RtAudioError::WARNING );
\r
6968 goto captureProbe;
\r
6971 // The device is open ... fill the parameter structure.
\r
6972 result = snd_pcm_hw_params_any( phandle, params );
\r
6973 if ( result < 0 ) {
\r
6974 snd_pcm_close( phandle );
\r
6975 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6976 errorText_ = errorStream_.str();
\r
6977 error( RtAudioError::WARNING );
\r
6978 goto captureProbe;
\r
6981 // Get output channel information.
\r
6982 unsigned int value;
\r
6983 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6984 if ( result < 0 ) {
\r
6985 snd_pcm_close( phandle );
\r
6986 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6987 errorText_ = errorStream_.str();
\r
6988 error( RtAudioError::WARNING );
\r
6989 goto captureProbe;
\r
6991 info.outputChannels = value;
\r
6992 snd_pcm_close( phandle );
\r
6995 stream = SND_PCM_STREAM_CAPTURE;
\r
6996 snd_pcm_info_set_stream( pcminfo, stream );
\r
6998 // Now try for capture unless default device (with subdev = -1)
\r
6999 if ( subdevice != -1 ) {
\r
7000 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7001 snd_ctl_close( chandle );
\r
7002 if ( result < 0 ) {
\r
7003 // Device probably doesn't support capture.
\r
7004 if ( info.outputChannels == 0 ) return info;
\r
7005 goto probeParameters;
\r
7009 snd_ctl_close( chandle );
\r
7011 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7012 if ( result < 0 ) {
\r
7013 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7014 errorText_ = errorStream_.str();
\r
7015 error( RtAudioError::WARNING );
\r
7016 if ( info.outputChannels == 0 ) return info;
\r
7017 goto probeParameters;
\r
7020 // The device is open ... fill the parameter structure.
\r
7021 result = snd_pcm_hw_params_any( phandle, params );
\r
7022 if ( result < 0 ) {
\r
7023 snd_pcm_close( phandle );
\r
7024 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7025 errorText_ = errorStream_.str();
\r
7026 error( RtAudioError::WARNING );
\r
7027 if ( info.outputChannels == 0 ) return info;
\r
7028 goto probeParameters;
\r
7031 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7032 if ( result < 0 ) {
\r
7033 snd_pcm_close( phandle );
\r
7034 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7035 errorText_ = errorStream_.str();
\r
7036 error( RtAudioError::WARNING );
\r
7037 if ( info.outputChannels == 0 ) return info;
\r
7038 goto probeParameters;
\r
7040 info.inputChannels = value;
\r
7041 snd_pcm_close( phandle );
\r
7043 // If device opens for both playback and capture, we determine the channels.
\r
7044 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7045 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7047 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7048 if ( device == 0 && info.outputChannels > 0 )
\r
7049 info.isDefaultOutput = true;
\r
7050 if ( device == 0 && info.inputChannels > 0 )
\r
7051 info.isDefaultInput = true;
\r
7054 // At this point, we just need to figure out the supported data
\r
7055 // formats and sample rates. We'll proceed by opening the device in
\r
7056 // the direction with the maximum number of channels, or playback if
\r
7057 // they are equal. This might limit our sample rate options, but so
\r
7060 if ( info.outputChannels >= info.inputChannels )
\r
7061 stream = SND_PCM_STREAM_PLAYBACK;
\r
7063 stream = SND_PCM_STREAM_CAPTURE;
\r
7064 snd_pcm_info_set_stream( pcminfo, stream );
\r
7066 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7067 if ( result < 0 ) {
\r
7068 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7069 errorText_ = errorStream_.str();
\r
7070 error( RtAudioError::WARNING );
\r
7074 // The device is open ... fill the parameter structure.
\r
7075 result = snd_pcm_hw_params_any( phandle, params );
\r
7076 if ( result < 0 ) {
\r
7077 snd_pcm_close( phandle );
\r
7078 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7079 errorText_ = errorStream_.str();
\r
7080 error( RtAudioError::WARNING );
\r
7084 // Test our discrete set of sample rate values.
\r
7085 info.sampleRates.clear();
\r
7086 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7087 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7088 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7090 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7091 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7094 if ( info.sampleRates.size() == 0 ) {
\r
7095 snd_pcm_close( phandle );
\r
7096 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7097 errorText_ = errorStream_.str();
\r
7098 error( RtAudioError::WARNING );
\r
7102 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7103 snd_pcm_format_t format;
\r
7104 info.nativeFormats = 0;
\r
7105 format = SND_PCM_FORMAT_S8;
\r
7106 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7107 info.nativeFormats |= RTAUDIO_SINT8;
\r
7108 format = SND_PCM_FORMAT_S16;
\r
7109 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7110 info.nativeFormats |= RTAUDIO_SINT16;
\r
7111 format = SND_PCM_FORMAT_S24;
\r
7112 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7113 info.nativeFormats |= RTAUDIO_SINT24;
\r
7114 format = SND_PCM_FORMAT_S32;
\r
7115 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7116 info.nativeFormats |= RTAUDIO_SINT32;
\r
7117 format = SND_PCM_FORMAT_FLOAT;
\r
7118 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7119 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7120 format = SND_PCM_FORMAT_FLOAT64;
\r
7121 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7122 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7124 // Check that we have at least one supported format
\r
7125 if ( info.nativeFormats == 0 ) {
\r
7126 snd_pcm_close( phandle );
\r
7127 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7128 errorText_ = errorStream_.str();
\r
7129 error( RtAudioError::WARNING );
\r
7133 // Get the device name
\r
7135 result = snd_card_get_name( card, &cardname );
\r
7136 if ( result >= 0 ) {
\r
7137 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7142 // That's all ... close the device and return
\r
7143 snd_pcm_close( phandle );
\r
7144 info.probed = true;
\r
7148 void RtApiAlsa :: saveDeviceInfo( void )
\r
7152 unsigned int nDevices = getDeviceCount();
\r
7153 devices_.resize( nDevices );
\r
7154 for ( unsigned int i=0; i<nDevices; i++ )
\r
7155 devices_[i] = getDeviceInfo( i );
\r
7158 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7159 unsigned int firstChannel, unsigned int sampleRate,
\r
7160 RtAudioFormat format, unsigned int *bufferSize,
\r
7161 RtAudio::StreamOptions *options )
\r
7164 #if defined(__RTAUDIO_DEBUG__)
\r
7165 snd_output_t *out;
\r
7166 snd_output_stdio_attach(&out, stderr, 0);
\r
7169 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7171 unsigned nDevices = 0;
\r
7172 int result, subdevice, card;
\r
7174 snd_ctl_t *chandle;
\r
7176 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7177 snprintf(name, sizeof(name), "%s", "default");
\r
7179 // Count cards and devices
\r
7181 snd_card_next( &card );
\r
7182 while ( card >= 0 ) {
\r
7183 sprintf( name, "hw:%d", card );
\r
7184 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7185 if ( result < 0 ) {
\r
7186 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7187 errorText_ = errorStream_.str();
\r
7192 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7193 if ( result < 0 ) break;
\r
7194 if ( subdevice < 0 ) break;
\r
7195 if ( nDevices == device ) {
\r
7196 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7197 snd_ctl_close( chandle );
\r
7202 snd_ctl_close( chandle );
\r
7203 snd_card_next( &card );
\r
7206 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7207 if ( result == 0 ) {
\r
7208 if ( nDevices == device ) {
\r
7209 strcpy( name, "default" );
\r
7215 if ( nDevices == 0 ) {
\r
7216 // This should not happen because a check is made before this function is called.
\r
7217 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7221 if ( device >= nDevices ) {
\r
7222 // This should not happen because a check is made before this function is called.
\r
7223 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7230 // The getDeviceInfo() function will not work for a device that is
\r
7231 // already open. Thus, we'll probe the system before opening a
\r
7232 // stream and save the results for use by getDeviceInfo().
\r
7233 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7234 this->saveDeviceInfo();
\r
7236 snd_pcm_stream_t stream;
\r
7237 if ( mode == OUTPUT )
\r
7238 stream = SND_PCM_STREAM_PLAYBACK;
\r
7240 stream = SND_PCM_STREAM_CAPTURE;
\r
7242 snd_pcm_t *phandle;
\r
7243 int openMode = SND_PCM_ASYNC;
\r
7244 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7245 if ( result < 0 ) {
\r
7246 if ( mode == OUTPUT )
\r
7247 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7249 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7250 errorText_ = errorStream_.str();
\r
7254 // Fill the parameter structure.
\r
7255 snd_pcm_hw_params_t *hw_params;
\r
7256 snd_pcm_hw_params_alloca( &hw_params );
\r
7257 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7258 if ( result < 0 ) {
\r
7259 snd_pcm_close( phandle );
\r
7260 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7261 errorText_ = errorStream_.str();
\r
7265 #if defined(__RTAUDIO_DEBUG__)
\r
7266 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7267 snd_pcm_hw_params_dump( hw_params, out );
\r
7270 // Set access ... check user preference.
\r
7271 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7272 stream_.userInterleaved = false;
\r
7273 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7274 if ( result < 0 ) {
\r
7275 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7276 stream_.deviceInterleaved[mode] = true;
\r
7279 stream_.deviceInterleaved[mode] = false;
\r
7282 stream_.userInterleaved = true;
\r
7283 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7284 if ( result < 0 ) {
\r
7285 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7286 stream_.deviceInterleaved[mode] = false;
\r
7289 stream_.deviceInterleaved[mode] = true;
\r
7292 if ( result < 0 ) {
\r
7293 snd_pcm_close( phandle );
\r
7294 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7295 errorText_ = errorStream_.str();
\r
7299 // Determine how to set the device format.
\r
7300 stream_.userFormat = format;
\r
7301 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7303 if ( format == RTAUDIO_SINT8 )
\r
7304 deviceFormat = SND_PCM_FORMAT_S8;
\r
7305 else if ( format == RTAUDIO_SINT16 )
\r
7306 deviceFormat = SND_PCM_FORMAT_S16;
\r
7307 else if ( format == RTAUDIO_SINT24 )
\r
7308 deviceFormat = SND_PCM_FORMAT_S24;
\r
7309 else if ( format == RTAUDIO_SINT32 )
\r
7310 deviceFormat = SND_PCM_FORMAT_S32;
\r
7311 else if ( format == RTAUDIO_FLOAT32 )
\r
7312 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7313 else if ( format == RTAUDIO_FLOAT64 )
\r
7314 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7316 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7317 stream_.deviceFormat[mode] = format;
\r
7321 // The user requested format is not natively supported by the device.
\r
7322 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7323 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7324 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7328 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7329 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7330 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7334 deviceFormat = SND_PCM_FORMAT_S32;
\r
7335 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7336 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7340 deviceFormat = SND_PCM_FORMAT_S24;
\r
7341 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7342 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7346 deviceFormat = SND_PCM_FORMAT_S16;
\r
7347 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7348 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7352 deviceFormat = SND_PCM_FORMAT_S8;
\r
7353 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7354 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7358 // If we get here, no supported format was found.
\r
7359 snd_pcm_close( phandle );
\r
7360 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7361 errorText_ = errorStream_.str();
\r
7365 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7366 if ( result < 0 ) {
\r
7367 snd_pcm_close( phandle );
\r
7368 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7369 errorText_ = errorStream_.str();
\r
7373 // Determine whether byte-swaping is necessary.
\r
7374 stream_.doByteSwap[mode] = false;
\r
7375 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7376 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7377 if ( result == 0 )
\r
7378 stream_.doByteSwap[mode] = true;
\r
7379 else if (result < 0) {
\r
7380 snd_pcm_close( phandle );
\r
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7382 errorText_ = errorStream_.str();
\r
7387 // Set the sample rate.
\r
7388 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7389 if ( result < 0 ) {
\r
7390 snd_pcm_close( phandle );
\r
7391 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7392 errorText_ = errorStream_.str();
\r
7396 // Determine the number of channels for this device. We support a possible
\r
7397 // minimum device channel number > than the value requested by the user.
\r
7398 stream_.nUserChannels[mode] = channels;
\r
7399 unsigned int value;
\r
7400 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7401 unsigned int deviceChannels = value;
\r
7402 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7403 snd_pcm_close( phandle );
\r
7404 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7405 errorText_ = errorStream_.str();
\r
7409 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7410 if ( result < 0 ) {
\r
7411 snd_pcm_close( phandle );
\r
7412 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7413 errorText_ = errorStream_.str();
\r
7416 deviceChannels = value;
\r
7417 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7418 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7420 // Set the device channels.
\r
7421 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7422 if ( result < 0 ) {
\r
7423 snd_pcm_close( phandle );
\r
7424 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7425 errorText_ = errorStream_.str();
\r
7429 // Set the buffer (or period) size.
\r
7431 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7432 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7433 if ( result < 0 ) {
\r
7434 snd_pcm_close( phandle );
\r
7435 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7436 errorText_ = errorStream_.str();
\r
7439 *bufferSize = periodSize;
\r
7441 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7442 unsigned int periods = 0;
\r
7443 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7444 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7445 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7446 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7447 if ( result < 0 ) {
\r
7448 snd_pcm_close( phandle );
\r
7449 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7450 errorText_ = errorStream_.str();
\r
7454 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7455 // MUST be the same in both directions!
\r
7456 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7457 snd_pcm_close( phandle );
\r
7458 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7459 errorText_ = errorStream_.str();
\r
7463 stream_.bufferSize = *bufferSize;
\r
7465 // Install the hardware configuration
\r
7466 result = snd_pcm_hw_params( phandle, hw_params );
\r
7467 if ( result < 0 ) {
\r
7468 snd_pcm_close( phandle );
\r
7469 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7470 errorText_ = errorStream_.str();
\r
7474 #if defined(__RTAUDIO_DEBUG__)
\r
7475 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7476 snd_pcm_hw_params_dump( hw_params, out );
\r
7479 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7480 snd_pcm_sw_params_t *sw_params = NULL;
\r
7481 snd_pcm_sw_params_alloca( &sw_params );
\r
7482 snd_pcm_sw_params_current( phandle, sw_params );
\r
7483 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7484 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7485 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7487 // The following two settings were suggested by Theo Veenker
\r
7488 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7489 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7491 // here are two options for a fix
\r
7492 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7493 snd_pcm_uframes_t val;
\r
7494 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7495 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7497 result = snd_pcm_sw_params( phandle, sw_params );
\r
7498 if ( result < 0 ) {
\r
7499 snd_pcm_close( phandle );
\r
7500 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7501 errorText_ = errorStream_.str();
\r
7505 #if defined(__RTAUDIO_DEBUG__)
\r
7506 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7507 snd_pcm_sw_params_dump( sw_params, out );
\r
7510 // Set flags for buffer conversion
\r
7511 stream_.doConvertBuffer[mode] = false;
\r
7512 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7513 stream_.doConvertBuffer[mode] = true;
\r
7514 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7515 stream_.doConvertBuffer[mode] = true;
\r
7516 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7517 stream_.nUserChannels[mode] > 1 )
\r
7518 stream_.doConvertBuffer[mode] = true;
\r
7520 // Allocate the ApiHandle if necessary and then save.
\r
7521 AlsaHandle *apiInfo = 0;
\r
7522 if ( stream_.apiHandle == 0 ) {
\r
7524 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7526 catch ( std::bad_alloc& ) {
\r
7527 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7531 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7532 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7536 stream_.apiHandle = (void *) apiInfo;
\r
7537 apiInfo->handles[0] = 0;
\r
7538 apiInfo->handles[1] = 0;
\r
7541 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7543 apiInfo->handles[mode] = phandle;
\r
7546 // Allocate necessary internal buffers.
\r
7547 unsigned long bufferBytes;
\r
7548 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7549 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7550 if ( stream_.userBuffer[mode] == NULL ) {
\r
7551 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7555 if ( stream_.doConvertBuffer[mode] ) {
\r
7557 bool makeBuffer = true;
\r
7558 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7559 if ( mode == INPUT ) {
\r
7560 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7561 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7562 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7566 if ( makeBuffer ) {
\r
7567 bufferBytes *= *bufferSize;
\r
7568 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7569 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7570 if ( stream_.deviceBuffer == NULL ) {
\r
7571 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7577 stream_.sampleRate = sampleRate;
\r
7578 stream_.nBuffers = periods;
\r
7579 stream_.device[mode] = device;
\r
7580 stream_.state = STREAM_STOPPED;
\r
7582 // Setup the buffer conversion information structure.
\r
7583 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7585 // Setup thread if necessary.
\r
7586 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7587 // We had already set up an output stream.
\r
7588 stream_.mode = DUPLEX;
\r
7589 // Link the streams if possible.
\r
7590 apiInfo->synchronized = false;
\r
7591 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7592 apiInfo->synchronized = true;
\r
7594 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7595 error( RtAudioError::WARNING );
\r
7599 stream_.mode = mode;
\r
7601 // Setup callback thread.
\r
7602 stream_.callbackInfo.object = (void *) this;
\r
7604 // Set the thread attributes for joinable and realtime scheduling
\r
7605 // priority (optional). The higher priority will only take affect
\r
7606 // if the program is run as root or suid. Note, under Linux
\r
7607 // processes with CAP_SYS_NICE privilege, a user can change
\r
7608 // scheduling policy and priority (thus need not be root). See
\r
7609 // POSIX "capabilities".
\r
7610 pthread_attr_t attr;
\r
7611 pthread_attr_init( &attr );
\r
7612 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7614 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7615 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7616 // We previously attempted to increase the audio callback priority
\r
7617 // to SCHED_RR here via the attributes. However, while no errors
\r
7618 // were reported in doing so, it did not work. So, now this is
\r
7619 // done in the alsaCallbackHandler function.
\r
7620 stream_.callbackInfo.doRealtime = true;
\r
7621 int priority = options->priority;
\r
7622 int min = sched_get_priority_min( SCHED_RR );
\r
7623 int max = sched_get_priority_max( SCHED_RR );
\r
7624 if ( priority < min ) priority = min;
\r
7625 else if ( priority > max ) priority = max;
\r
7626 stream_.callbackInfo.priority = priority;
\r
7630 stream_.callbackInfo.isRunning = true;
\r
7631 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7632 pthread_attr_destroy( &attr );
\r
7634 stream_.callbackInfo.isRunning = false;
\r
7635 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7644 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7645 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7646 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7648 stream_.apiHandle = 0;
\r
7651 if ( phandle) snd_pcm_close( phandle );
\r
7653 for ( int i=0; i<2; i++ ) {
\r
7654 if ( stream_.userBuffer[i] ) {
\r
7655 free( stream_.userBuffer[i] );
\r
7656 stream_.userBuffer[i] = 0;
\r
7660 if ( stream_.deviceBuffer ) {
\r
7661 free( stream_.deviceBuffer );
\r
7662 stream_.deviceBuffer = 0;
\r
7665 stream_.state = STREAM_CLOSED;
\r
7669 void RtApiAlsa :: closeStream()
\r
7671 if ( stream_.state == STREAM_CLOSED ) {
\r
7672 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7673 error( RtAudioError::WARNING );
\r
7677 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7678 stream_.callbackInfo.isRunning = false;
\r
7679 MUTEX_LOCK( &stream_.mutex );
\r
7680 if ( stream_.state == STREAM_STOPPED ) {
\r
7681 apiInfo->runnable = true;
\r
7682 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7684 MUTEX_UNLOCK( &stream_.mutex );
\r
7685 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7687 if ( stream_.state == STREAM_RUNNING ) {
\r
7688 stream_.state = STREAM_STOPPED;
\r
7689 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7690 snd_pcm_drop( apiInfo->handles[0] );
\r
7691 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7692 snd_pcm_drop( apiInfo->handles[1] );
\r
7696 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7697 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7698 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7700 stream_.apiHandle = 0;
\r
7703 for ( int i=0; i<2; i++ ) {
\r
7704 if ( stream_.userBuffer[i] ) {
\r
7705 free( stream_.userBuffer[i] );
\r
7706 stream_.userBuffer[i] = 0;
\r
7710 if ( stream_.deviceBuffer ) {
\r
7711 free( stream_.deviceBuffer );
\r
7712 stream_.deviceBuffer = 0;
\r
7715 stream_.mode = UNINITIALIZED;
\r
7716 stream_.state = STREAM_CLOSED;
\r
7719 void RtApiAlsa :: startStream()
\r
7721 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7724 if ( stream_.state == STREAM_RUNNING ) {
\r
7725 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7726 error( RtAudioError::WARNING );
\r
7730 MUTEX_LOCK( &stream_.mutex );
\r
7733 snd_pcm_state_t state;
\r
7734 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7735 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7736 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7737 state = snd_pcm_state( handle[0] );
\r
7738 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7739 result = snd_pcm_prepare( handle[0] );
\r
7740 if ( result < 0 ) {
\r
7741 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7742 errorText_ = errorStream_.str();
\r
7748 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7749 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7750 state = snd_pcm_state( handle[1] );
\r
7751 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7752 result = snd_pcm_prepare( handle[1] );
\r
7753 if ( result < 0 ) {
\r
7754 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7755 errorText_ = errorStream_.str();
\r
7761 stream_.state = STREAM_RUNNING;
\r
7764 apiInfo->runnable = true;
\r
7765 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7766 MUTEX_UNLOCK( &stream_.mutex );
\r
7768 if ( result >= 0 ) return;
\r
7769 error( RtAudioError::SYSTEM_ERROR );
\r
7772 void RtApiAlsa :: stopStream()
\r
7775 if ( stream_.state == STREAM_STOPPED ) {
\r
7776 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7777 error( RtAudioError::WARNING );
\r
7781 stream_.state = STREAM_STOPPED;
\r
7782 MUTEX_LOCK( &stream_.mutex );
\r
7785 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7786 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7787 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7788 if ( apiInfo->synchronized )
\r
7789 result = snd_pcm_drop( handle[0] );
\r
7791 result = snd_pcm_drain( handle[0] );
\r
7792 if ( result < 0 ) {
\r
7793 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7794 errorText_ = errorStream_.str();
\r
7799 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7800 result = snd_pcm_drop( handle[1] );
\r
7801 if ( result < 0 ) {
\r
7802 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7803 errorText_ = errorStream_.str();
\r
7809 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7810 MUTEX_UNLOCK( &stream_.mutex );
\r
7812 if ( result >= 0 ) return;
\r
7813 error( RtAudioError::SYSTEM_ERROR );
\r
7816 void RtApiAlsa :: abortStream()
\r
7819 if ( stream_.state == STREAM_STOPPED ) {
\r
7820 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7821 error( RtAudioError::WARNING );
\r
7825 stream_.state = STREAM_STOPPED;
\r
7826 MUTEX_LOCK( &stream_.mutex );
\r
7829 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7830 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7831 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7832 result = snd_pcm_drop( handle[0] );
\r
7833 if ( result < 0 ) {
\r
7834 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7835 errorText_ = errorStream_.str();
\r
7840 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7841 result = snd_pcm_drop( handle[1] );
\r
7842 if ( result < 0 ) {
\r
7843 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7844 errorText_ = errorStream_.str();
\r
7850 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7851 MUTEX_UNLOCK( &stream_.mutex );
\r
7853 if ( result >= 0 ) return;
\r
7854 error( RtAudioError::SYSTEM_ERROR );
\r
7857 void RtApiAlsa :: callbackEvent()
\r
7859 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7860 if ( stream_.state == STREAM_STOPPED ) {
\r
7861 MUTEX_LOCK( &stream_.mutex );
\r
7862 while ( !apiInfo->runnable )
\r
7863 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7865 if ( stream_.state != STREAM_RUNNING ) {
\r
7866 MUTEX_UNLOCK( &stream_.mutex );
\r
7869 MUTEX_UNLOCK( &stream_.mutex );
\r
7872 if ( stream_.state == STREAM_CLOSED ) {
\r
7873 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7874 error( RtAudioError::WARNING );
\r
7878 int doStopStream = 0;
\r
7879 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7880 double streamTime = getStreamTime();
\r
7881 RtAudioStreamStatus status = 0;
\r
7882 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7883 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7884 apiInfo->xrun[0] = false;
\r
7886 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7887 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7888 apiInfo->xrun[1] = false;
\r
7890 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7891 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7893 if ( doStopStream == 2 ) {
\r
7898 MUTEX_LOCK( &stream_.mutex );
\r
7900 // The state might change while waiting on a mutex.
\r
7901 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7906 snd_pcm_t **handle;
\r
7907 snd_pcm_sframes_t frames;
\r
7908 RtAudioFormat format;
\r
7909 handle = (snd_pcm_t **) apiInfo->handles;
\r
7911 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7913 // Setup parameters.
\r
7914 if ( stream_.doConvertBuffer[1] ) {
\r
7915 buffer = stream_.deviceBuffer;
\r
7916 channels = stream_.nDeviceChannels[1];
\r
7917 format = stream_.deviceFormat[1];
\r
7920 buffer = stream_.userBuffer[1];
\r
7921 channels = stream_.nUserChannels[1];
\r
7922 format = stream_.userFormat;
\r
7925 // Read samples from device in interleaved/non-interleaved format.
\r
7926 if ( stream_.deviceInterleaved[1] )
\r
7927 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7929 void *bufs[channels];
\r
7930 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7931 for ( int i=0; i<channels; i++ )
\r
7932 bufs[i] = (void *) (buffer + (i * offset));
\r
7933 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7936 if ( result < (int) stream_.bufferSize ) {
\r
7937 // Either an error or overrun occured.
\r
7938 if ( result == -EPIPE ) {
\r
7939 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7940 if ( state == SND_PCM_STATE_XRUN ) {
\r
7941 apiInfo->xrun[1] = true;
\r
7942 result = snd_pcm_prepare( handle[1] );
\r
7943 if ( result < 0 ) {
\r
7944 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7945 errorText_ = errorStream_.str();
\r
7949 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7950 errorText_ = errorStream_.str();
\r
7954 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7955 errorText_ = errorStream_.str();
\r
7957 error( RtAudioError::WARNING );
\r
7961 // Do byte swapping if necessary.
\r
7962 if ( stream_.doByteSwap[1] )
\r
7963 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7965 // Do buffer conversion if necessary.
\r
7966 if ( stream_.doConvertBuffer[1] )
\r
7967 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7969 // Check stream latency
\r
7970 result = snd_pcm_delay( handle[1], &frames );
\r
7971 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7976 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7978 // Setup parameters and do buffer conversion if necessary.
\r
7979 if ( stream_.doConvertBuffer[0] ) {
\r
7980 buffer = stream_.deviceBuffer;
\r
7981 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7982 channels = stream_.nDeviceChannels[0];
\r
7983 format = stream_.deviceFormat[0];
\r
7986 buffer = stream_.userBuffer[0];
\r
7987 channels = stream_.nUserChannels[0];
\r
7988 format = stream_.userFormat;
\r
7991 // Do byte swapping if necessary.
\r
7992 if ( stream_.doByteSwap[0] )
\r
7993 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7995 // Write samples to device in interleaved/non-interleaved format.
\r
7996 if ( stream_.deviceInterleaved[0] )
\r
7997 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7999 void *bufs[channels];
\r
8000 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8001 for ( int i=0; i<channels; i++ )
\r
8002 bufs[i] = (void *) (buffer + (i * offset));
\r
8003 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8006 if ( result < (int) stream_.bufferSize ) {
\r
8007 // Either an error or underrun occured.
\r
8008 if ( result == -EPIPE ) {
\r
8009 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8010 if ( state == SND_PCM_STATE_XRUN ) {
\r
8011 apiInfo->xrun[0] = true;
\r
8012 result = snd_pcm_prepare( handle[0] );
\r
8013 if ( result < 0 ) {
\r
8014 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8015 errorText_ = errorStream_.str();
\r
8019 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8020 errorText_ = errorStream_.str();
\r
8024 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8025 errorText_ = errorStream_.str();
\r
8027 error( RtAudioError::WARNING );
\r
8031 // Check stream latency
\r
8032 result = snd_pcm_delay( handle[0], &frames );
\r
8033 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8037 MUTEX_UNLOCK( &stream_.mutex );
\r
8039 RtApi::tickStreamTime();
\r
8040 if ( doStopStream == 1 ) this->stopStream();
\r
8043 static void *alsaCallbackHandler( void *ptr )
\r
8045 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8046 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8047 bool *isRunning = &info->isRunning;
\r
8049 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8050 if ( &info->doRealtime ) {
\r
8051 pthread_t tID = pthread_self(); // ID of this thread
\r
8052 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8053 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8057 while ( *isRunning == true ) {
\r
8058 pthread_testcancel();
\r
8059 object->callbackEvent();
\r
8062 pthread_exit( NULL );
\r
8065 //******************** End of __LINUX_ALSA__ *********************//
\r
8068 #if defined(__LINUX_PULSE__)
\r
8070 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8071 // and Tristan Matthews.
\r
8073 #include <pulse/error.h>
\r
8074 #include <pulse/simple.h>
\r
8077 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8078 44100, 48000, 96000, 0};
\r
8080 struct rtaudio_pa_format_mapping_t {
\r
8081 RtAudioFormat rtaudio_format;
\r
8082 pa_sample_format_t pa_format;
\r
8085 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8086 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8087 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8088 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8089 {0, PA_SAMPLE_INVALID}};
\r
8091 struct PulseAudioHandle {
\r
8092 pa_simple *s_play;
\r
8095 pthread_cond_t runnable_cv;
\r
8097 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8100 RtApiPulse::~RtApiPulse()
\r
8102 if ( stream_.state != STREAM_CLOSED )
\r
8106 unsigned int RtApiPulse::getDeviceCount( void )
\r
8111 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8113 RtAudio::DeviceInfo info;
\r
8114 info.probed = true;
\r
8115 info.name = "PulseAudio";
\r
8116 info.outputChannels = 2;
\r
8117 info.inputChannels = 2;
\r
8118 info.duplexChannels = 2;
\r
8119 info.isDefaultOutput = true;
\r
8120 info.isDefaultInput = true;
\r
8122 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8123 info.sampleRates.push_back( *sr );
\r
8125 info.preferredSampleRate = 48000;
\r
8126 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8131 static void *pulseaudio_callback( void * user )
\r
8133 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8134 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8135 volatile bool *isRunning = &cbi->isRunning;
\r
8137 while ( *isRunning ) {
\r
8138 pthread_testcancel();
\r
8139 context->callbackEvent();
\r
8142 pthread_exit( NULL );
\r
8145 void RtApiPulse::closeStream( void )
\r
8147 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8149 stream_.callbackInfo.isRunning = false;
\r
8151 MUTEX_LOCK( &stream_.mutex );
\r
8152 if ( stream_.state == STREAM_STOPPED ) {
\r
8153 pah->runnable = true;
\r
8154 pthread_cond_signal( &pah->runnable_cv );
\r
8156 MUTEX_UNLOCK( &stream_.mutex );
\r
8158 pthread_join( pah->thread, 0 );
\r
8159 if ( pah->s_play ) {
\r
8160 pa_simple_flush( pah->s_play, NULL );
\r
8161 pa_simple_free( pah->s_play );
\r
8164 pa_simple_free( pah->s_rec );
\r
8166 pthread_cond_destroy( &pah->runnable_cv );
\r
8168 stream_.apiHandle = 0;
\r
8171 if ( stream_.userBuffer[0] ) {
\r
8172 free( stream_.userBuffer[0] );
\r
8173 stream_.userBuffer[0] = 0;
\r
8175 if ( stream_.userBuffer[1] ) {
\r
8176 free( stream_.userBuffer[1] );
\r
8177 stream_.userBuffer[1] = 0;
\r
8180 stream_.state = STREAM_CLOSED;
\r
8181 stream_.mode = UNINITIALIZED;
\r
8184 void RtApiPulse::callbackEvent( void )
\r
8186 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8188 if ( stream_.state == STREAM_STOPPED ) {
\r
8189 MUTEX_LOCK( &stream_.mutex );
\r
8190 while ( !pah->runnable )
\r
8191 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8193 if ( stream_.state != STREAM_RUNNING ) {
\r
8194 MUTEX_UNLOCK( &stream_.mutex );
\r
8197 MUTEX_UNLOCK( &stream_.mutex );
\r
8200 if ( stream_.state == STREAM_CLOSED ) {
\r
8201 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8202 "this shouldn't happen!";
\r
8203 error( RtAudioError::WARNING );
\r
8207 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8208 double streamTime = getStreamTime();
\r
8209 RtAudioStreamStatus status = 0;
\r
8210 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8211 stream_.bufferSize, streamTime, status,
\r
8212 stream_.callbackInfo.userData );
\r
8214 if ( doStopStream == 2 ) {
\r
8219 MUTEX_LOCK( &stream_.mutex );
\r
8220 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8221 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8223 if ( stream_.state != STREAM_RUNNING )
\r
8228 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8229 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8230 convertBuffer( stream_.deviceBuffer,
\r
8231 stream_.userBuffer[OUTPUT],
\r
8232 stream_.convertInfo[OUTPUT] );
\r
8233 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8234 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8236 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8237 formatBytes( stream_.userFormat );
\r
8239 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8240 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8241 pa_strerror( pa_error ) << ".";
\r
8242 errorText_ = errorStream_.str();
\r
8243 error( RtAudioError::WARNING );
\r
8247 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8248 if ( stream_.doConvertBuffer[INPUT] )
\r
8249 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8250 formatBytes( stream_.deviceFormat[INPUT] );
\r
8252 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8253 formatBytes( stream_.userFormat );
\r
8255 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8256 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8257 pa_strerror( pa_error ) << ".";
\r
8258 errorText_ = errorStream_.str();
\r
8259 error( RtAudioError::WARNING );
\r
8261 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8262 convertBuffer( stream_.userBuffer[INPUT],
\r
8263 stream_.deviceBuffer,
\r
8264 stream_.convertInfo[INPUT] );
\r
8269 MUTEX_UNLOCK( &stream_.mutex );
\r
8270 RtApi::tickStreamTime();
\r
8272 if ( doStopStream == 1 )
\r
8276 void RtApiPulse::startStream( void )
\r
8278 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8280 if ( stream_.state == STREAM_CLOSED ) {
\r
8281 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8282 error( RtAudioError::INVALID_USE );
\r
8285 if ( stream_.state == STREAM_RUNNING ) {
\r
8286 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8287 error( RtAudioError::WARNING );
\r
8291 MUTEX_LOCK( &stream_.mutex );
\r
8293 stream_.state = STREAM_RUNNING;
\r
8295 pah->runnable = true;
\r
8296 pthread_cond_signal( &pah->runnable_cv );
\r
8297 MUTEX_UNLOCK( &stream_.mutex );
\r
8300 void RtApiPulse::stopStream( void )
\r
8302 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8304 if ( stream_.state == STREAM_CLOSED ) {
\r
8305 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8306 error( RtAudioError::INVALID_USE );
\r
8309 if ( stream_.state == STREAM_STOPPED ) {
\r
8310 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8311 error( RtAudioError::WARNING );
\r
8315 stream_.state = STREAM_STOPPED;
\r
8316 MUTEX_LOCK( &stream_.mutex );
\r
8318 if ( pah && pah->s_play ) {
\r
8320 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8321 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8322 pa_strerror( pa_error ) << ".";
\r
8323 errorText_ = errorStream_.str();
\r
8324 MUTEX_UNLOCK( &stream_.mutex );
\r
8325 error( RtAudioError::SYSTEM_ERROR );
\r
8330 stream_.state = STREAM_STOPPED;
\r
8331 MUTEX_UNLOCK( &stream_.mutex );
\r
8334 void RtApiPulse::abortStream( void )
\r
8336 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8338 if ( stream_.state == STREAM_CLOSED ) {
\r
8339 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8340 error( RtAudioError::INVALID_USE );
\r
8343 if ( stream_.state == STREAM_STOPPED ) {
\r
8344 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8345 error( RtAudioError::WARNING );
\r
8349 stream_.state = STREAM_STOPPED;
\r
8350 MUTEX_LOCK( &stream_.mutex );
\r
8352 if ( pah && pah->s_play ) {
\r
8354 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8355 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8356 pa_strerror( pa_error ) << ".";
\r
8357 errorText_ = errorStream_.str();
\r
8358 MUTEX_UNLOCK( &stream_.mutex );
\r
8359 error( RtAudioError::SYSTEM_ERROR );
\r
8364 stream_.state = STREAM_STOPPED;
\r
8365 MUTEX_UNLOCK( &stream_.mutex );
\r
8368 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8369 unsigned int channels, unsigned int firstChannel,
\r
8370 unsigned int sampleRate, RtAudioFormat format,
\r
8371 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8373 PulseAudioHandle *pah = 0;
\r
8374 unsigned long bufferBytes = 0;
\r
8375 pa_sample_spec ss;
\r
8377 if ( device != 0 ) return false;
\r
8378 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8379 if ( channels != 1 && channels != 2 ) {
\r
8380 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8383 ss.channels = channels;
\r
8385 if ( firstChannel != 0 ) return false;
\r
8387 bool sr_found = false;
\r
8388 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8389 if ( sampleRate == *sr ) {
\r
8391 stream_.sampleRate = sampleRate;
\r
8392 ss.rate = sampleRate;
\r
8396 if ( !sr_found ) {
\r
8397 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8401 bool sf_found = 0;
\r
8402 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8403 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8404 if ( format == sf->rtaudio_format ) {
\r
8406 stream_.userFormat = sf->rtaudio_format;
\r
8407 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8408 ss.format = sf->pa_format;
\r
8412 if ( !sf_found ) { // Use internal data format conversion.
\r
8413 stream_.userFormat = format;
\r
8414 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8415 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8418 // Set other stream parameters.
\r
8419 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8420 else stream_.userInterleaved = true;
\r
8421 stream_.deviceInterleaved[mode] = true;
\r
8422 stream_.nBuffers = 1;
\r
8423 stream_.doByteSwap[mode] = false;
\r
8424 stream_.nUserChannels[mode] = channels;
\r
8425 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8426 stream_.channelOffset[mode] = 0;
\r
8427 std::string streamName = "RtAudio";
\r
8429 // Set flags for buffer conversion.
\r
8430 stream_.doConvertBuffer[mode] = false;
\r
8431 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8432 stream_.doConvertBuffer[mode] = true;
\r
8433 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8434 stream_.doConvertBuffer[mode] = true;
\r
8436 // Allocate necessary internal buffers.
\r
8437 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8438 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8439 if ( stream_.userBuffer[mode] == NULL ) {
\r
8440 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8443 stream_.bufferSize = *bufferSize;
\r
8445 if ( stream_.doConvertBuffer[mode] ) {
\r
8447 bool makeBuffer = true;
\r
8448 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8449 if ( mode == INPUT ) {
\r
8450 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8451 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8452 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8456 if ( makeBuffer ) {
\r
8457 bufferBytes *= *bufferSize;
\r
8458 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8459 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8460 if ( stream_.deviceBuffer == NULL ) {
\r
8461 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8467 stream_.device[mode] = device;
\r
8469 // Setup the buffer conversion information structure.
\r
8470 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8472 if ( !stream_.apiHandle ) {
\r
8473 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8475 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8479 stream_.apiHandle = pah;
\r
8480 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8481 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8485 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8488 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8491 pa_buffer_attr buffer_attr;
\r
8492 buffer_attr.fragsize = bufferBytes;
\r
8493 buffer_attr.maxlength = -1;
\r
8495 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8496 if ( !pah->s_rec ) {
\r
8497 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8502 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8503 if ( !pah->s_play ) {
\r
8504 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8512 if ( stream_.mode == UNINITIALIZED )
\r
8513 stream_.mode = mode;
\r
8514 else if ( stream_.mode == mode )
\r
8517 stream_.mode = DUPLEX;
\r
8519 if ( !stream_.callbackInfo.isRunning ) {
\r
8520 stream_.callbackInfo.object = this;
\r
8521 stream_.callbackInfo.isRunning = true;
\r
8522 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8523 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8528 stream_.state = STREAM_STOPPED;
\r
8532 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8533 pthread_cond_destroy( &pah->runnable_cv );
\r
8535 stream_.apiHandle = 0;
\r
8538 for ( int i=0; i<2; i++ ) {
\r
8539 if ( stream_.userBuffer[i] ) {
\r
8540 free( stream_.userBuffer[i] );
\r
8541 stream_.userBuffer[i] = 0;
\r
8545 if ( stream_.deviceBuffer ) {
\r
8546 free( stream_.deviceBuffer );
\r
8547 stream_.deviceBuffer = 0;
\r
8553 //******************** End of __LINUX_PULSE__ *********************//
\r
8556 #if defined(__LINUX_OSS__)
\r
8558 #include <unistd.h>
\r
8559 #include <sys/ioctl.h>
\r
8560 #include <unistd.h>
\r
8561 #include <fcntl.h>
\r
8562 #include <sys/soundcard.h>
\r
8563 #include <errno.h>
\r
8566 static void *ossCallbackHandler(void * ptr);
\r
8568 // A structure to hold various information related to the OSS API
\r
8569 // implementation.
\r
8570 struct OssHandle {
\r
8571 int id[2]; // device ids
\r
8574 pthread_cond_t runnable;
\r
8577 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8580 RtApiOss :: RtApiOss()
\r
8582 // Nothing to do here.
\r
8585 RtApiOss :: ~RtApiOss()
\r
8587 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8590 unsigned int RtApiOss :: getDeviceCount( void )
\r
8592 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8593 if ( mixerfd == -1 ) {
\r
8594 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8595 error( RtAudioError::WARNING );
\r
8599 oss_sysinfo sysinfo;
\r
8600 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8602 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8603 error( RtAudioError::WARNING );
\r
8608 return sysinfo.numaudios;
\r
8611 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8613 RtAudio::DeviceInfo info;
\r
8614 info.probed = false;
\r
8616 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8617 if ( mixerfd == -1 ) {
\r
8618 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8619 error( RtAudioError::WARNING );
\r
8623 oss_sysinfo sysinfo;
\r
8624 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8625 if ( result == -1 ) {
\r
8627 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8628 error( RtAudioError::WARNING );
\r
8632 unsigned nDevices = sysinfo.numaudios;
\r
8633 if ( nDevices == 0 ) {
\r
8635 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8636 error( RtAudioError::INVALID_USE );
\r
8640 if ( device >= nDevices ) {
\r
8642 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8643 error( RtAudioError::INVALID_USE );
\r
8647 oss_audioinfo ainfo;
\r
8648 ainfo.dev = device;
\r
8649 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8651 if ( result == -1 ) {
\r
8652 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8653 errorText_ = errorStream_.str();
\r
8654 error( RtAudioError::WARNING );
\r
8659 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8660 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8661 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8662 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8663 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8666 // Probe data formats ... do for input
\r
8667 unsigned long mask = ainfo.iformats;
\r
8668 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8669 info.nativeFormats |= RTAUDIO_SINT16;
\r
8670 if ( mask & AFMT_S8 )
\r
8671 info.nativeFormats |= RTAUDIO_SINT8;
\r
8672 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8673 info.nativeFormats |= RTAUDIO_SINT32;
\r
8674 if ( mask & AFMT_FLOAT )
\r
8675 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8676 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8677 info.nativeFormats |= RTAUDIO_SINT24;
\r
8679 // Check that we have at least one supported format
\r
8680 if ( info.nativeFormats == 0 ) {
\r
8681 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8682 errorText_ = errorStream_.str();
\r
8683 error( RtAudioError::WARNING );
\r
8687 // Probe the supported sample rates.
\r
8688 info.sampleRates.clear();
\r
8689 if ( ainfo.nrates ) {
\r
8690 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8691 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8692 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8693 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8695 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8696 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8704 // Check min and max rate values;
\r
8705 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8706 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8707 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8709 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8710 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8715 if ( info.sampleRates.size() == 0 ) {
\r
8716 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8717 errorText_ = errorStream_.str();
\r
8718 error( RtAudioError::WARNING );
\r
8721 info.probed = true;
\r
8722 info.name = ainfo.name;
\r
8729 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8730 unsigned int firstChannel, unsigned int sampleRate,
\r
8731 RtAudioFormat format, unsigned int *bufferSize,
\r
8732 RtAudio::StreamOptions *options )
\r
8734 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8735 if ( mixerfd == -1 ) {
\r
8736 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8740 oss_sysinfo sysinfo;
\r
8741 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8742 if ( result == -1 ) {
\r
8744 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8748 unsigned nDevices = sysinfo.numaudios;
\r
8749 if ( nDevices == 0 ) {
\r
8750 // This should not happen because a check is made before this function is called.
\r
8752 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8756 if ( device >= nDevices ) {
\r
8757 // This should not happen because a check is made before this function is called.
\r
8759 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8763 oss_audioinfo ainfo;
\r
8764 ainfo.dev = device;
\r
8765 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8767 if ( result == -1 ) {
\r
8768 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8769 errorText_ = errorStream_.str();
\r
8773 // Check if device supports input or output
\r
8774 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8775 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8776 if ( mode == OUTPUT )
\r
8777 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8779 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8780 errorText_ = errorStream_.str();
\r
8785 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8786 if ( mode == OUTPUT )
\r
8787 flags |= O_WRONLY;
\r
8788 else { // mode == INPUT
\r
8789 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8790 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8791 close( handle->id[0] );
\r
8792 handle->id[0] = 0;
\r
8793 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8794 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8795 errorText_ = errorStream_.str();
\r
8798 // Check that the number previously set channels is the same.
\r
8799 if ( stream_.nUserChannels[0] != channels ) {
\r
8800 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8801 errorText_ = errorStream_.str();
\r
8807 flags |= O_RDONLY;
\r
8810 // Set exclusive access if specified.
\r
8811 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8813 // Try to open the device.
\r
8815 fd = open( ainfo.devnode, flags, 0 );
\r
8817 if ( errno == EBUSY )
\r
8818 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8820 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8821 errorText_ = errorStream_.str();
\r
8825 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8827 if ( flags | O_RDWR ) {
\r
8828 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8829 if ( result == -1) {
\r
8830 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8831 errorText_ = errorStream_.str();
\r
8837 // Check the device channel support.
\r
8838 stream_.nUserChannels[mode] = channels;
\r
8839 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8841 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8842 errorText_ = errorStream_.str();
\r
8846 // Set the number of channels.
\r
8847 int deviceChannels = channels + firstChannel;
\r
8848 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8849 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8851 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8852 errorText_ = errorStream_.str();
\r
8855 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8857 // Get the data format mask
\r
8859 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8860 if ( result == -1 ) {
\r
8862 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8863 errorText_ = errorStream_.str();
\r
8867 // Determine how to set the device format.
\r
8868 stream_.userFormat = format;
\r
8869 int deviceFormat = -1;
\r
8870 stream_.doByteSwap[mode] = false;
\r
8871 if ( format == RTAUDIO_SINT8 ) {
\r
8872 if ( mask & AFMT_S8 ) {
\r
8873 deviceFormat = AFMT_S8;
\r
8874 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8877 else if ( format == RTAUDIO_SINT16 ) {
\r
8878 if ( mask & AFMT_S16_NE ) {
\r
8879 deviceFormat = AFMT_S16_NE;
\r
8880 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8882 else if ( mask & AFMT_S16_OE ) {
\r
8883 deviceFormat = AFMT_S16_OE;
\r
8884 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8885 stream_.doByteSwap[mode] = true;
\r
8888 else if ( format == RTAUDIO_SINT24 ) {
\r
8889 if ( mask & AFMT_S24_NE ) {
\r
8890 deviceFormat = AFMT_S24_NE;
\r
8891 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8893 else if ( mask & AFMT_S24_OE ) {
\r
8894 deviceFormat = AFMT_S24_OE;
\r
8895 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8896 stream_.doByteSwap[mode] = true;
\r
8899 else if ( format == RTAUDIO_SINT32 ) {
\r
8900 if ( mask & AFMT_S32_NE ) {
\r
8901 deviceFormat = AFMT_S32_NE;
\r
8902 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8904 else if ( mask & AFMT_S32_OE ) {
\r
8905 deviceFormat = AFMT_S32_OE;
\r
8906 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8907 stream_.doByteSwap[mode] = true;
\r
8911 if ( deviceFormat == -1 ) {
\r
8912 // The user requested format is not natively supported by the device.
\r
8913 if ( mask & AFMT_S16_NE ) {
\r
8914 deviceFormat = AFMT_S16_NE;
\r
8915 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8917 else if ( mask & AFMT_S32_NE ) {
\r
8918 deviceFormat = AFMT_S32_NE;
\r
8919 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8921 else if ( mask & AFMT_S24_NE ) {
\r
8922 deviceFormat = AFMT_S24_NE;
\r
8923 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8925 else if ( mask & AFMT_S16_OE ) {
\r
8926 deviceFormat = AFMT_S16_OE;
\r
8927 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8928 stream_.doByteSwap[mode] = true;
\r
8930 else if ( mask & AFMT_S32_OE ) {
\r
8931 deviceFormat = AFMT_S32_OE;
\r
8932 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8933 stream_.doByteSwap[mode] = true;
\r
8935 else if ( mask & AFMT_S24_OE ) {
\r
8936 deviceFormat = AFMT_S24_OE;
\r
8937 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8938 stream_.doByteSwap[mode] = true;
\r
8940 else if ( mask & AFMT_S8) {
\r
8941 deviceFormat = AFMT_S8;
\r
8942 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8946 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8947 // This really shouldn't happen ...
\r
8949 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8950 errorText_ = errorStream_.str();
\r
8954 // Set the data format.
\r
8955 int temp = deviceFormat;
\r
8956 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8957 if ( result == -1 || deviceFormat != temp ) {
\r
8959 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8960 errorText_ = errorStream_.str();
\r
8964 // Attempt to set the buffer size. According to OSS, the minimum
\r
8965 // number of buffers is two. The supposed minimum buffer size is 16
\r
8966 // bytes, so that will be our lower bound. The argument to this
\r
8967 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8968 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8969 // We'll check the actual value used near the end of the setup
\r
8971 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8972 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8974 if ( options ) buffers = options->numberOfBuffers;
\r
8975 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8976 if ( buffers < 2 ) buffers = 3;
\r
8977 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8978 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8979 if ( result == -1 ) {
\r
8981 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8982 errorText_ = errorStream_.str();
\r
8985 stream_.nBuffers = buffers;
\r
8987 // Save buffer size (in sample frames).
\r
8988 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8989 stream_.bufferSize = *bufferSize;
\r
8991 // Set the sample rate.
\r
8992 int srate = sampleRate;
\r
8993 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8994 if ( result == -1 ) {
\r
8996 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8997 errorText_ = errorStream_.str();
\r
9001 // Verify the sample rate setup worked.
\r
9002 if ( abs( srate - sampleRate ) > 100 ) {
\r
9004 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9005 errorText_ = errorStream_.str();
\r
9008 stream_.sampleRate = sampleRate;
\r
9010 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9011 // We're doing duplex setup here.
\r
9012 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9013 stream_.nDeviceChannels[0] = deviceChannels;
\r
9016 // Set interleaving parameters.
\r
9017 stream_.userInterleaved = true;
\r
9018 stream_.deviceInterleaved[mode] = true;
\r
9019 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9020 stream_.userInterleaved = false;
\r
9022 // Set flags for buffer conversion
\r
9023 stream_.doConvertBuffer[mode] = false;
\r
9024 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9025 stream_.doConvertBuffer[mode] = true;
\r
9026 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9027 stream_.doConvertBuffer[mode] = true;
\r
9028 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9029 stream_.nUserChannels[mode] > 1 )
\r
9030 stream_.doConvertBuffer[mode] = true;
\r
9032 // Allocate the stream handles if necessary and then save.
\r
9033 if ( stream_.apiHandle == 0 ) {
\r
9035 handle = new OssHandle;
\r
9037 catch ( std::bad_alloc& ) {
\r
9038 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9042 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9043 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9047 stream_.apiHandle = (void *) handle;
\r
9050 handle = (OssHandle *) stream_.apiHandle;
\r
9052 handle->id[mode] = fd;
\r
9054 // Allocate necessary internal buffers.
\r
9055 unsigned long bufferBytes;
\r
9056 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9057 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9058 if ( stream_.userBuffer[mode] == NULL ) {
\r
9059 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9063 if ( stream_.doConvertBuffer[mode] ) {
\r
9065 bool makeBuffer = true;
\r
9066 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9067 if ( mode == INPUT ) {
\r
9068 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9069 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9070 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9074 if ( makeBuffer ) {
\r
9075 bufferBytes *= *bufferSize;
\r
9076 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9077 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9078 if ( stream_.deviceBuffer == NULL ) {
\r
9079 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9085 stream_.device[mode] = device;
\r
9086 stream_.state = STREAM_STOPPED;
\r
9088 // Setup the buffer conversion information structure.
\r
9089 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9091 // Setup thread if necessary.
\r
9092 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9093 // We had already set up an output stream.
\r
9094 stream_.mode = DUPLEX;
\r
9095 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9098 stream_.mode = mode;
\r
9100 // Setup callback thread.
\r
9101 stream_.callbackInfo.object = (void *) this;
\r
9103 // Set the thread attributes for joinable and realtime scheduling
\r
9104 // priority. The higher priority will only take affect if the
\r
9105 // program is run as root or suid.
\r
9106 pthread_attr_t attr;
\r
9107 pthread_attr_init( &attr );
\r
9108 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9109 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9110 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9111 struct sched_param param;
\r
9112 int priority = options->priority;
\r
9113 int min = sched_get_priority_min( SCHED_RR );
\r
9114 int max = sched_get_priority_max( SCHED_RR );
\r
9115 if ( priority < min ) priority = min;
\r
9116 else if ( priority > max ) priority = max;
\r
9117 param.sched_priority = priority;
\r
9118 pthread_attr_setschedparam( &attr, ¶m );
\r
9119 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9122 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9124 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9127 stream_.callbackInfo.isRunning = true;
\r
9128 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9129 pthread_attr_destroy( &attr );
\r
9131 stream_.callbackInfo.isRunning = false;
\r
9132 errorText_ = "RtApiOss::error creating callback thread!";
\r
9141 pthread_cond_destroy( &handle->runnable );
\r
9142 if ( handle->id[0] ) close( handle->id[0] );
\r
9143 if ( handle->id[1] ) close( handle->id[1] );
\r
9145 stream_.apiHandle = 0;
\r
9148 for ( int i=0; i<2; i++ ) {
\r
9149 if ( stream_.userBuffer[i] ) {
\r
9150 free( stream_.userBuffer[i] );
\r
9151 stream_.userBuffer[i] = 0;
\r
9155 if ( stream_.deviceBuffer ) {
\r
9156 free( stream_.deviceBuffer );
\r
9157 stream_.deviceBuffer = 0;
\r
9163 void RtApiOss :: closeStream()
\r
9165 if ( stream_.state == STREAM_CLOSED ) {
\r
9166 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9167 error( RtAudioError::WARNING );
\r
9171 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9172 stream_.callbackInfo.isRunning = false;
\r
9173 MUTEX_LOCK( &stream_.mutex );
\r
9174 if ( stream_.state == STREAM_STOPPED )
\r
9175 pthread_cond_signal( &handle->runnable );
\r
9176 MUTEX_UNLOCK( &stream_.mutex );
\r
9177 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9179 if ( stream_.state == STREAM_RUNNING ) {
\r
9180 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9181 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9183 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9184 stream_.state = STREAM_STOPPED;
\r
9188 pthread_cond_destroy( &handle->runnable );
\r
9189 if ( handle->id[0] ) close( handle->id[0] );
\r
9190 if ( handle->id[1] ) close( handle->id[1] );
\r
9192 stream_.apiHandle = 0;
\r
9195 for ( int i=0; i<2; i++ ) {
\r
9196 if ( stream_.userBuffer[i] ) {
\r
9197 free( stream_.userBuffer[i] );
\r
9198 stream_.userBuffer[i] = 0;
\r
9202 if ( stream_.deviceBuffer ) {
\r
9203 free( stream_.deviceBuffer );
\r
9204 stream_.deviceBuffer = 0;
\r
9207 stream_.mode = UNINITIALIZED;
\r
9208 stream_.state = STREAM_CLOSED;
\r
9211 void RtApiOss :: startStream()
\r
9214 if ( stream_.state == STREAM_RUNNING ) {
\r
9215 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9216 error( RtAudioError::WARNING );
\r
9220 MUTEX_LOCK( &stream_.mutex );
\r
9222 stream_.state = STREAM_RUNNING;
\r
9224 // No need to do anything else here ... OSS automatically starts
\r
9225 // when fed samples.
\r
9227 MUTEX_UNLOCK( &stream_.mutex );
\r
9229 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9230 pthread_cond_signal( &handle->runnable );
\r
9233 void RtApiOss :: stopStream()
\r
9236 if ( stream_.state == STREAM_STOPPED ) {
\r
9237 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9238 error( RtAudioError::WARNING );
\r
9242 MUTEX_LOCK( &stream_.mutex );
\r
9244 // The state might change while waiting on a mutex.
\r
9245 if ( stream_.state == STREAM_STOPPED ) {
\r
9246 MUTEX_UNLOCK( &stream_.mutex );
\r
9251 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9252 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9254 // Flush the output with zeros a few times.
\r
9257 RtAudioFormat format;
\r
9259 if ( stream_.doConvertBuffer[0] ) {
\r
9260 buffer = stream_.deviceBuffer;
\r
9261 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9262 format = stream_.deviceFormat[0];
\r
9265 buffer = stream_.userBuffer[0];
\r
9266 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9267 format = stream_.userFormat;
\r
9270 memset( buffer, 0, samples * formatBytes(format) );
\r
9271 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9272 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9273 if ( result == -1 ) {
\r
9274 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9275 error( RtAudioError::WARNING );
\r
9279 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9280 if ( result == -1 ) {
\r
9281 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9282 errorText_ = errorStream_.str();
\r
9285 handle->triggered = false;
\r
9288 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9289 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9290 if ( result == -1 ) {
\r
9291 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9292 errorText_ = errorStream_.str();
\r
9298 stream_.state = STREAM_STOPPED;
\r
9299 MUTEX_UNLOCK( &stream_.mutex );
\r
9301 if ( result != -1 ) return;
\r
9302 error( RtAudioError::SYSTEM_ERROR );
\r
9305 void RtApiOss :: abortStream()
\r
9308 if ( stream_.state == STREAM_STOPPED ) {
\r
9309 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9310 error( RtAudioError::WARNING );
\r
9314 MUTEX_LOCK( &stream_.mutex );
\r
9316 // The state might change while waiting on a mutex.
\r
9317 if ( stream_.state == STREAM_STOPPED ) {
\r
9318 MUTEX_UNLOCK( &stream_.mutex );
\r
9323 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9324 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9325 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9326 if ( result == -1 ) {
\r
9327 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9328 errorText_ = errorStream_.str();
\r
9331 handle->triggered = false;
\r
9334 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9335 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9336 if ( result == -1 ) {
\r
9337 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9338 errorText_ = errorStream_.str();
\r
9344 stream_.state = STREAM_STOPPED;
\r
9345 MUTEX_UNLOCK( &stream_.mutex );
\r
9347 if ( result != -1 ) return;
\r
9348 error( RtAudioError::SYSTEM_ERROR );
\r
9351 void RtApiOss :: callbackEvent()
\r
9353 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9354 if ( stream_.state == STREAM_STOPPED ) {
\r
9355 MUTEX_LOCK( &stream_.mutex );
\r
9356 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9357 if ( stream_.state != STREAM_RUNNING ) {
\r
9358 MUTEX_UNLOCK( &stream_.mutex );
\r
9361 MUTEX_UNLOCK( &stream_.mutex );
\r
9364 if ( stream_.state == STREAM_CLOSED ) {
\r
9365 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9366 error( RtAudioError::WARNING );
\r
9370 // Invoke user callback to get fresh output data.
\r
9371 int doStopStream = 0;
\r
9372 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9373 double streamTime = getStreamTime();
\r
9374 RtAudioStreamStatus status = 0;
\r
9375 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9376 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9377 handle->xrun[0] = false;
\r
9379 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9380 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9381 handle->xrun[1] = false;
\r
9383 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9384 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9385 if ( doStopStream == 2 ) {
\r
9386 this->abortStream();
\r
9390 MUTEX_LOCK( &stream_.mutex );
\r
9392 // The state might change while waiting on a mutex.
\r
9393 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9398 RtAudioFormat format;
\r
9400 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9402 // Setup parameters and do buffer conversion if necessary.
\r
9403 if ( stream_.doConvertBuffer[0] ) {
\r
9404 buffer = stream_.deviceBuffer;
\r
9405 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9406 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9407 format = stream_.deviceFormat[0];
\r
9410 buffer = stream_.userBuffer[0];
\r
9411 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9412 format = stream_.userFormat;
\r
9415 // Do byte swapping if necessary.
\r
9416 if ( stream_.doByteSwap[0] )
\r
9417 byteSwapBuffer( buffer, samples, format );
\r
9419 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9421 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9422 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9423 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9424 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9425 handle->triggered = true;
\r
9428 // Write samples to device.
\r
9429 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9431 if ( result == -1 ) {
\r
9432 // We'll assume this is an underrun, though there isn't a
\r
9433 // specific means for determining that.
\r
9434 handle->xrun[0] = true;
\r
9435 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9436 error( RtAudioError::WARNING );
\r
9437 // Continue on to input section.
\r
9441 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9443 // Setup parameters.
\r
9444 if ( stream_.doConvertBuffer[1] ) {
\r
9445 buffer = stream_.deviceBuffer;
\r
9446 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9447 format = stream_.deviceFormat[1];
\r
9450 buffer = stream_.userBuffer[1];
\r
9451 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9452 format = stream_.userFormat;
\r
9455 // Read samples from device.
\r
9456 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9458 if ( result == -1 ) {
\r
9459 // We'll assume this is an overrun, though there isn't a
\r
9460 // specific means for determining that.
\r
9461 handle->xrun[1] = true;
\r
9462 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9463 error( RtAudioError::WARNING );
\r
9467 // Do byte swapping if necessary.
\r
9468 if ( stream_.doByteSwap[1] )
\r
9469 byteSwapBuffer( buffer, samples, format );
\r
9471 // Do buffer conversion if necessary.
\r
9472 if ( stream_.doConvertBuffer[1] )
\r
9473 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9477 MUTEX_UNLOCK( &stream_.mutex );
\r
9479 RtApi::tickStreamTime();
\r
9480 if ( doStopStream == 1 ) this->stopStream();
\r
9483 static void *ossCallbackHandler( void *ptr )
\r
9485 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9486 RtApiOss *object = (RtApiOss *) info->object;
\r
9487 bool *isRunning = &info->isRunning;
\r
9489 while ( *isRunning == true ) {
\r
9490 pthread_testcancel();
\r
9491 object->callbackEvent();
\r
9494 pthread_exit( NULL );
\r
9497 //******************** End of __LINUX_OSS__ *********************//
\r
9501 // *************************************************** //
\r
9503 // Protected common (OS-independent) RtAudio methods.
\r
9505 // *************************************************** //
\r
9507 // This method can be modified to control the behavior of error
\r
9508 // message printing.
\r
9509 void RtApi :: error( RtAudioError::Type type )
\r
9511 errorStream_.str(""); // clear the ostringstream
\r
9513 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9514 if ( errorCallback ) {
\r
9515 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9517 if ( firstErrorOccurred_ )
\r
9520 firstErrorOccurred_ = true;
\r
9521 const std::string errorMessage = errorText_;
\r
9523 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9524 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9528 errorCallback( type, errorMessage );
\r
9529 firstErrorOccurred_ = false;
\r
9533 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9534 std::cerr << '\n' << errorText_ << "\n\n";
\r
9535 else if ( type != RtAudioError::WARNING )
\r
9536 throw( RtAudioError( errorText_, type ) );
\r
9539 void RtApi :: verifyStream()
\r
9541 if ( stream_.state == STREAM_CLOSED ) {
\r
9542 errorText_ = "RtApi:: a stream is not open!";
\r
9543 error( RtAudioError::INVALID_USE );
\r
9547 void RtApi :: clearStreamInfo()
\r
9549 stream_.mode = UNINITIALIZED;
\r
9550 stream_.state = STREAM_CLOSED;
\r
9551 stream_.sampleRate = 0;
\r
9552 stream_.bufferSize = 0;
\r
9553 stream_.nBuffers = 0;
\r
9554 stream_.userFormat = 0;
\r
9555 stream_.userInterleaved = true;
\r
9556 stream_.streamTime = 0.0;
\r
9557 stream_.apiHandle = 0;
\r
9558 stream_.deviceBuffer = 0;
\r
9559 stream_.callbackInfo.callback = 0;
\r
9560 stream_.callbackInfo.userData = 0;
\r
9561 stream_.callbackInfo.isRunning = false;
\r
9562 stream_.callbackInfo.errorCallback = 0;
\r
9563 for ( int i=0; i<2; i++ ) {
\r
9564 stream_.device[i] = 11111;
\r
9565 stream_.doConvertBuffer[i] = false;
\r
9566 stream_.deviceInterleaved[i] = true;
\r
9567 stream_.doByteSwap[i] = false;
\r
9568 stream_.nUserChannels[i] = 0;
\r
9569 stream_.nDeviceChannels[i] = 0;
\r
9570 stream_.channelOffset[i] = 0;
\r
9571 stream_.deviceFormat[i] = 0;
\r
9572 stream_.latency[i] = 0;
\r
9573 stream_.userBuffer[i] = 0;
\r
9574 stream_.convertInfo[i].channels = 0;
\r
9575 stream_.convertInfo[i].inJump = 0;
\r
9576 stream_.convertInfo[i].outJump = 0;
\r
9577 stream_.convertInfo[i].inFormat = 0;
\r
9578 stream_.convertInfo[i].outFormat = 0;
\r
9579 stream_.convertInfo[i].inOffset.clear();
\r
9580 stream_.convertInfo[i].outOffset.clear();
\r
9584 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9586 if ( format == RTAUDIO_SINT16 )
\r
9588 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9590 else if ( format == RTAUDIO_FLOAT64 )
\r
9592 else if ( format == RTAUDIO_SINT24 )
\r
9594 else if ( format == RTAUDIO_SINT8 )
\r
9597 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9598 error( RtAudioError::WARNING );
\r
9603 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9605 if ( mode == INPUT ) { // convert device to user buffer
\r
9606 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9607 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9608 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9609 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9611 else { // convert user to device buffer
\r
9612 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9613 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9614 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9615 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9618 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9619 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9621 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9623 // Set up the interleave/deinterleave offsets.
\r
9624 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9625 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9626 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9627 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9628 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9629 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9630 stream_.convertInfo[mode].inJump = 1;
\r
9634 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9635 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9636 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9637 stream_.convertInfo[mode].outJump = 1;
\r
9641 else { // no (de)interleaving
\r
9642 if ( stream_.userInterleaved ) {
\r
9643 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9644 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9645 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9649 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9650 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9651 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9652 stream_.convertInfo[mode].inJump = 1;
\r
9653 stream_.convertInfo[mode].outJump = 1;
\r
9658 // Add channel offset.
\r
9659 if ( firstChannel > 0 ) {
\r
9660 if ( stream_.deviceInterleaved[mode] ) {
\r
9661 if ( mode == OUTPUT ) {
\r
9662 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9663 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9666 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9667 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9671 if ( mode == OUTPUT ) {
\r
9672 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9673 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9676 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9677 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9683 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9685 // This function does format conversion, input/output channel compensation, and
\r
9686 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9687 // the lower three bytes of a 32-bit integer.
\r
9689 // Clear our device buffer when in/out duplex device channels are different
\r
9690 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9691 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9692 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9695 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9697 Float64 *out = (Float64 *)outBuffer;
\r
9699 if (info.inFormat == RTAUDIO_SINT8) {
\r
9700 signed char *in = (signed char *)inBuffer;
\r
9701 scale = 1.0 / 127.5;
\r
9702 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9703 for (j=0; j<info.channels; j++) {
\r
9704 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9705 out[info.outOffset[j]] += 0.5;
\r
9706 out[info.outOffset[j]] *= scale;
\r
9708 in += info.inJump;
\r
9709 out += info.outJump;
\r
9712 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9713 Int16 *in = (Int16 *)inBuffer;
\r
9714 scale = 1.0 / 32767.5;
\r
9715 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9716 for (j=0; j<info.channels; j++) {
\r
9717 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9718 out[info.outOffset[j]] += 0.5;
\r
9719 out[info.outOffset[j]] *= scale;
\r
9721 in += info.inJump;
\r
9722 out += info.outJump;
\r
9725 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9726 Int24 *in = (Int24 *)inBuffer;
\r
9727 scale = 1.0 / 8388607.5;
\r
9728 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9729 for (j=0; j<info.channels; j++) {
\r
9730 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9731 out[info.outOffset[j]] += 0.5;
\r
9732 out[info.outOffset[j]] *= scale;
\r
9734 in += info.inJump;
\r
9735 out += info.outJump;
\r
9738 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9739 Int32 *in = (Int32 *)inBuffer;
\r
9740 scale = 1.0 / 2147483647.5;
\r
9741 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9742 for (j=0; j<info.channels; j++) {
\r
9743 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9744 out[info.outOffset[j]] += 0.5;
\r
9745 out[info.outOffset[j]] *= scale;
\r
9747 in += info.inJump;
\r
9748 out += info.outJump;
\r
9751 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9752 Float32 *in = (Float32 *)inBuffer;
\r
9753 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9754 for (j=0; j<info.channels; j++) {
\r
9755 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9757 in += info.inJump;
\r
9758 out += info.outJump;
\r
9761 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9762 // Channel compensation and/or (de)interleaving only.
\r
9763 Float64 *in = (Float64 *)inBuffer;
\r
9764 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9765 for (j=0; j<info.channels; j++) {
\r
9766 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9768 in += info.inJump;
\r
9769 out += info.outJump;
\r
9773 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9775 Float32 *out = (Float32 *)outBuffer;
\r
9777 if (info.inFormat == RTAUDIO_SINT8) {
\r
9778 signed char *in = (signed char *)inBuffer;
\r
9779 scale = (Float32) ( 1.0 / 127.5 );
\r
9780 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9781 for (j=0; j<info.channels; j++) {
\r
9782 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9783 out[info.outOffset[j]] += 0.5;
\r
9784 out[info.outOffset[j]] *= scale;
\r
9786 in += info.inJump;
\r
9787 out += info.outJump;
\r
9790 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9791 Int16 *in = (Int16 *)inBuffer;
\r
9792 scale = (Float32) ( 1.0 / 32767.5 );
\r
9793 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9794 for (j=0; j<info.channels; j++) {
\r
9795 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9796 out[info.outOffset[j]] += 0.5;
\r
9797 out[info.outOffset[j]] *= scale;
\r
9799 in += info.inJump;
\r
9800 out += info.outJump;
\r
9803 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9804 Int24 *in = (Int24 *)inBuffer;
\r
9805 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9806 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9807 for (j=0; j<info.channels; j++) {
\r
9808 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9809 out[info.outOffset[j]] += 0.5;
\r
9810 out[info.outOffset[j]] *= scale;
\r
9812 in += info.inJump;
\r
9813 out += info.outJump;
\r
9816 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9817 Int32 *in = (Int32 *)inBuffer;
\r
9818 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9819 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9820 for (j=0; j<info.channels; j++) {
\r
9821 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9822 out[info.outOffset[j]] += 0.5;
\r
9823 out[info.outOffset[j]] *= scale;
\r
9825 in += info.inJump;
\r
9826 out += info.outJump;
\r
9829 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9830 // Channel compensation and/or (de)interleaving only.
\r
9831 Float32 *in = (Float32 *)inBuffer;
\r
9832 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9833 for (j=0; j<info.channels; j++) {
\r
9834 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9836 in += info.inJump;
\r
9837 out += info.outJump;
\r
9840 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9841 Float64 *in = (Float64 *)inBuffer;
\r
9842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9843 for (j=0; j<info.channels; j++) {
\r
9844 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9846 in += info.inJump;
\r
9847 out += info.outJump;
\r
9851 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9852 Int32 *out = (Int32 *)outBuffer;
\r
9853 if (info.inFormat == RTAUDIO_SINT8) {
\r
9854 signed char *in = (signed char *)inBuffer;
\r
9855 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9856 for (j=0; j<info.channels; j++) {
\r
9857 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9858 out[info.outOffset[j]] <<= 24;
\r
9860 in += info.inJump;
\r
9861 out += info.outJump;
\r
9864 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9865 Int16 *in = (Int16 *)inBuffer;
\r
9866 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9867 for (j=0; j<info.channels; j++) {
\r
9868 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9869 out[info.outOffset[j]] <<= 16;
\r
9871 in += info.inJump;
\r
9872 out += info.outJump;
\r
9875 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9876 Int24 *in = (Int24 *)inBuffer;
\r
9877 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9878 for (j=0; j<info.channels; j++) {
\r
9879 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9880 out[info.outOffset[j]] <<= 8;
\r
9882 in += info.inJump;
\r
9883 out += info.outJump;
\r
9886 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9887 // Channel compensation and/or (de)interleaving only.
\r
9888 Int32 *in = (Int32 *)inBuffer;
\r
9889 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9890 for (j=0; j<info.channels; j++) {
\r
9891 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9893 in += info.inJump;
\r
9894 out += info.outJump;
\r
9897 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9898 Float32 *in = (Float32 *)inBuffer;
\r
9899 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9900 for (j=0; j<info.channels; j++) {
\r
9901 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9903 in += info.inJump;
\r
9904 out += info.outJump;
\r
9907 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9908 Float64 *in = (Float64 *)inBuffer;
\r
9909 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9910 for (j=0; j<info.channels; j++) {
\r
9911 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9913 in += info.inJump;
\r
9914 out += info.outJump;
\r
9918 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9919 Int24 *out = (Int24 *)outBuffer;
\r
9920 if (info.inFormat == RTAUDIO_SINT8) {
\r
9921 signed char *in = (signed char *)inBuffer;
\r
9922 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9923 for (j=0; j<info.channels; j++) {
\r
9924 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9925 //out[info.outOffset[j]] <<= 16;
\r
9927 in += info.inJump;
\r
9928 out += info.outJump;
\r
9931 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9932 Int16 *in = (Int16 *)inBuffer;
\r
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9934 for (j=0; j<info.channels; j++) {
\r
9935 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9936 //out[info.outOffset[j]] <<= 8;
\r
9938 in += info.inJump;
\r
9939 out += info.outJump;
\r
9942 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9943 // Channel compensation and/or (de)interleaving only.
\r
9944 Int24 *in = (Int24 *)inBuffer;
\r
9945 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9946 for (j=0; j<info.channels; j++) {
\r
9947 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9949 in += info.inJump;
\r
9950 out += info.outJump;
\r
9953 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9954 Int32 *in = (Int32 *)inBuffer;
\r
9955 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9956 for (j=0; j<info.channels; j++) {
\r
9957 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9958 //out[info.outOffset[j]] >>= 8;
\r
9960 in += info.inJump;
\r
9961 out += info.outJump;
\r
9964 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9965 Float32 *in = (Float32 *)inBuffer;
\r
9966 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9967 for (j=0; j<info.channels; j++) {
\r
9968 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9970 in += info.inJump;
\r
9971 out += info.outJump;
\r
9974 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9975 Float64 *in = (Float64 *)inBuffer;
\r
9976 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9977 for (j=0; j<info.channels; j++) {
\r
9978 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9980 in += info.inJump;
\r
9981 out += info.outJump;
\r
9985 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9986 Int16 *out = (Int16 *)outBuffer;
\r
9987 if (info.inFormat == RTAUDIO_SINT8) {
\r
9988 signed char *in = (signed char *)inBuffer;
\r
9989 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9990 for (j=0; j<info.channels; j++) {
\r
9991 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9992 out[info.outOffset[j]] <<= 8;
\r
9994 in += info.inJump;
\r
9995 out += info.outJump;
\r
9998 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9999 // Channel compensation and/or (de)interleaving only.
\r
10000 Int16 *in = (Int16 *)inBuffer;
\r
10001 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10002 for (j=0; j<info.channels; j++) {
\r
10003 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10005 in += info.inJump;
\r
10006 out += info.outJump;
\r
10009 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10010 Int24 *in = (Int24 *)inBuffer;
\r
10011 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10012 for (j=0; j<info.channels; j++) {
\r
10013 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10015 in += info.inJump;
\r
10016 out += info.outJump;
\r
10019 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10020 Int32 *in = (Int32 *)inBuffer;
\r
10021 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10022 for (j=0; j<info.channels; j++) {
\r
10023 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10025 in += info.inJump;
\r
10026 out += info.outJump;
\r
10029 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10030 Float32 *in = (Float32 *)inBuffer;
\r
10031 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10032 for (j=0; j<info.channels; j++) {
\r
10033 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10035 in += info.inJump;
\r
10036 out += info.outJump;
\r
10039 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10040 Float64 *in = (Float64 *)inBuffer;
\r
10041 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10042 for (j=0; j<info.channels; j++) {
\r
10043 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10045 in += info.inJump;
\r
10046 out += info.outJump;
\r
10050 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10051 signed char *out = (signed char *)outBuffer;
\r
10052 if (info.inFormat == RTAUDIO_SINT8) {
\r
10053 // Channel compensation and/or (de)interleaving only.
\r
10054 signed char *in = (signed char *)inBuffer;
\r
10055 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10056 for (j=0; j<info.channels; j++) {
\r
10057 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10059 in += info.inJump;
\r
10060 out += info.outJump;
\r
10063 if (info.inFormat == RTAUDIO_SINT16) {
\r
10064 Int16 *in = (Int16 *)inBuffer;
\r
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10066 for (j=0; j<info.channels; j++) {
\r
10067 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10069 in += info.inJump;
\r
10070 out += info.outJump;
\r
10073 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10074 Int24 *in = (Int24 *)inBuffer;
\r
10075 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10076 for (j=0; j<info.channels; j++) {
\r
10077 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10079 in += info.inJump;
\r
10080 out += info.outJump;
\r
10083 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10084 Int32 *in = (Int32 *)inBuffer;
\r
10085 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10086 for (j=0; j<info.channels; j++) {
\r
10087 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10089 in += info.inJump;
\r
10090 out += info.outJump;
\r
10093 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10094 Float32 *in = (Float32 *)inBuffer;
\r
10095 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10096 for (j=0; j<info.channels; j++) {
\r
10097 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10099 in += info.inJump;
\r
10100 out += info.outJump;
\r
10103 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10104 Float64 *in = (Float64 *)inBuffer;
\r
10105 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10106 for (j=0; j<info.channels; j++) {
\r
10107 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10109 in += info.inJump;
\r
10110 out += info.outJump;
\r
10116 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10117 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10118 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10120 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10122 register char val;
\r
10123 register char *ptr;
\r
10126 if ( format == RTAUDIO_SINT16 ) {
\r
10127 for ( unsigned int i=0; i<samples; i++ ) {
\r
10128 // Swap 1st and 2nd bytes.
\r
10130 *(ptr) = *(ptr+1);
\r
10133 // Increment 2 bytes.
\r
10137 else if ( format == RTAUDIO_SINT32 ||
\r
10138 format == RTAUDIO_FLOAT32 ) {
\r
10139 for ( unsigned int i=0; i<samples; i++ ) {
\r
10140 // Swap 1st and 4th bytes.
\r
10142 *(ptr) = *(ptr+3);
\r
10145 // Swap 2nd and 3rd bytes.
\r
10148 *(ptr) = *(ptr+1);
\r
10151 // Increment 3 more bytes.
\r
10155 else if ( format == RTAUDIO_SINT24 ) {
\r
10156 for ( unsigned int i=0; i<samples; i++ ) {
\r
10157 // Swap 1st and 3rd bytes.
\r
10159 *(ptr) = *(ptr+2);
\r
10162 // Increment 2 more bytes.
\r
10166 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10167 for ( unsigned int i=0; i<samples; i++ ) {
\r
10168 // Swap 1st and 8th bytes
\r
10170 *(ptr) = *(ptr+7);
\r
10173 // Swap 2nd and 7th bytes
\r
10176 *(ptr) = *(ptr+5);
\r
10179 // Swap 3rd and 6th bytes
\r
10182 *(ptr) = *(ptr+3);
\r
10185 // Swap 4th and 5th bytes
\r
10188 *(ptr) = *(ptr+1);
\r
10191 // Increment 5 more bytes.
\r
10197 // Indentation settings for Vim and Emacs
\r
10199 // Local Variables:
\r
10200 // c-basic-offset: 2
\r
10201 // indent-tabs-mode: nil
\r
10204 // vim: et sts=2 sw=2
\r