1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1410 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1411 kAudioObjectPropertyScopeGlobal,
\r
1412 kAudioObjectPropertyElementMaster };
\r
1414 property.mSelector = kAudioDeviceProcessorOverload;
\r
1415 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1416 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1417 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1418 error( RtAudioError::WARNING );
\r
1421 if ( stream_.state == STREAM_RUNNING )
\r
1422 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1423 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1424 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1426 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1427 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1431 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1433 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1434 kAudioObjectPropertyScopeGlobal,
\r
1435 kAudioObjectPropertyElementMaster };
\r
1437 property.mSelector = kAudioDeviceProcessorOverload;
\r
1438 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1439 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1440 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1441 error( RtAudioError::WARNING );
\r
1444 if ( stream_.state == STREAM_RUNNING )
\r
1445 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1446 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1447 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1449 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1450 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1454 for ( int i=0; i<2; i++ ) {
\r
1455 if ( stream_.userBuffer[i] ) {
\r
1456 free( stream_.userBuffer[i] );
\r
1457 stream_.userBuffer[i] = 0;
\r
1461 if ( stream_.deviceBuffer ) {
\r
1462 free( stream_.deviceBuffer );
\r
1463 stream_.deviceBuffer = 0;
\r
1466 // Destroy pthread condition variable.
\r
1467 pthread_cond_destroy( &handle->condition );
\r
1469 stream_.apiHandle = 0;
\r
1471 stream_.mode = UNINITIALIZED;
\r
1472 stream_.state = STREAM_CLOSED;
\r
1475 void RtApiCore :: startStream( void )
\r
1478 if ( stream_.state == STREAM_RUNNING ) {
\r
1479 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1480 error( RtAudioError::WARNING );
\r
1484 OSStatus result = noErr;
\r
1485 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1488 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1489 if ( result != noErr ) {
\r
1490 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1491 errorText_ = errorStream_.str();
\r
1496 if ( stream_.mode == INPUT ||
\r
1497 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1499 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1500 if ( result != noErr ) {
\r
1501 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1502 errorText_ = errorStream_.str();
\r
1507 handle->drainCounter = 0;
\r
1508 handle->internalDrain = false;
\r
1509 stream_.state = STREAM_RUNNING;
\r
1512 if ( result == noErr ) return;
\r
1513 error( RtAudioError::SYSTEM_ERROR );
\r
1516 void RtApiCore :: stopStream( void )
\r
1519 if ( stream_.state == STREAM_STOPPED ) {
\r
1520 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1521 error( RtAudioError::WARNING );
\r
1525 OSStatus result = noErr;
\r
1526 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1529 if ( handle->drainCounter == 0 ) {
\r
1530 handle->drainCounter = 2;
\r
1531 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1534 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1535 if ( result != noErr ) {
\r
1536 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1537 errorText_ = errorStream_.str();
\r
1542 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1544 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1545 if ( result != noErr ) {
\r
1546 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1547 errorText_ = errorStream_.str();
\r
1552 stream_.state = STREAM_STOPPED;
\r
1555 if ( result == noErr ) return;
\r
1556 error( RtAudioError::SYSTEM_ERROR );
\r
1559 void RtApiCore :: abortStream( void )
\r
1562 if ( stream_.state == STREAM_STOPPED ) {
\r
1563 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1564 error( RtAudioError::WARNING );
\r
1568 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1569 handle->drainCounter = 2;
\r
1574 // This function will be called by a spawned thread when the user
\r
1575 // callback function signals that the stream should be stopped or
\r
1576 // aborted. It is better to handle it this way because the
\r
1577 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1578 // function is called.
\r
1579 static void *coreStopStream( void *ptr )
\r
1581 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1582 RtApiCore *object = (RtApiCore *) info->object;
\r
1584 object->stopStream();
\r
1585 pthread_exit( NULL );
\r
1588 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1589 const AudioBufferList *inBufferList,
\r
1590 const AudioBufferList *outBufferList )
\r
1592 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1593 if ( stream_.state == STREAM_CLOSED ) {
\r
1594 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1595 error( RtAudioError::WARNING );
\r
1599 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1602 // Check if we were draining the stream and signal is finished.
\r
1603 if ( handle->drainCounter > 3 ) {
\r
1604 ThreadHandle threadId;
\r
1606 stream_.state = STREAM_STOPPING;
\r
1607 if ( handle->internalDrain == true )
\r
1608 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1609 else // external call to stopStream()
\r
1610 pthread_cond_signal( &handle->condition );
\r
1614 AudioDeviceID outputDevice = handle->id[0];
\r
1616 // Invoke user callback to get fresh output data UNLESS we are
\r
1617 // draining stream or duplex mode AND the input/output devices are
\r
1618 // different AND this function is called for the input device.
\r
1619 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1620 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1621 double streamTime = getStreamTime();
\r
1622 RtAudioStreamStatus status = 0;
\r
1623 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1624 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1625 handle->xrun[0] = false;
\r
1627 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1628 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1629 handle->xrun[1] = false;
\r
1632 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1633 stream_.bufferSize, streamTime, status, info->userData );
\r
1634 if ( cbReturnValue == 2 ) {
\r
1635 stream_.state = STREAM_STOPPING;
\r
1636 handle->drainCounter = 2;
\r
1640 else if ( cbReturnValue == 1 ) {
\r
1641 handle->drainCounter = 1;
\r
1642 handle->internalDrain = true;
\r
1646 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1648 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1650 if ( handle->nStreams[0] == 1 ) {
\r
1651 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1653 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1655 else { // fill multiple streams with zeros
\r
1656 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1657 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1659 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1663 else if ( handle->nStreams[0] == 1 ) {
\r
1664 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1665 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1666 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1668 else { // copy from user buffer
\r
1669 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0],
\r
1671 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1674 else { // fill multiple streams
\r
1675 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1676 if ( stream_.doConvertBuffer[0] ) {
\r
1677 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1678 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1681 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1682 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1683 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1684 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1685 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1688 else { // fill multiple multi-channel streams with interleaved data
\r
1689 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1690 Float32 *out, *in;
\r
1692 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1693 UInt32 inChannels = stream_.nUserChannels[0];
\r
1694 if ( stream_.doConvertBuffer[0] ) {
\r
1695 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1696 inChannels = stream_.nDeviceChannels[0];
\r
1699 if ( inInterleaved ) inOffset = 1;
\r
1700 else inOffset = stream_.bufferSize;
\r
1702 channelsLeft = inChannels;
\r
1703 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1705 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1706 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1709 // Account for possible channel offset in first stream
\r
1710 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1711 streamChannels -= stream_.channelOffset[0];
\r
1712 outJump = stream_.channelOffset[0];
\r
1716 // Account for possible unfilled channels at end of the last stream
\r
1717 if ( streamChannels > channelsLeft ) {
\r
1718 outJump = streamChannels - channelsLeft;
\r
1719 streamChannels = channelsLeft;
\r
1722 // Determine input buffer offsets and skips
\r
1723 if ( inInterleaved ) {
\r
1724 inJump = inChannels;
\r
1725 in += inChannels - channelsLeft;
\r
1729 in += (inChannels - channelsLeft) * inOffset;
\r
1732 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1733 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1734 *out++ = in[j*inOffset];
\r
1739 channelsLeft -= streamChannels;
\r
1745 // Don't bother draining input
\r
1746 if ( handle->drainCounter ) {
\r
1747 handle->drainCounter++;
\r
1751 AudioDeviceID inputDevice;
\r
1752 inputDevice = handle->id[1];
\r
1753 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1755 if ( handle->nStreams[1] == 1 ) {
\r
1756 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1757 convertBuffer( stream_.userBuffer[1],
\r
1758 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1759 stream_.convertInfo[1] );
\r
1761 else { // copy to user buffer
\r
1762 memcpy( stream_.userBuffer[1],
\r
1763 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1767 else { // read from multiple streams
\r
1768 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1769 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1771 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1772 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1773 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1774 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1775 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1778 else { // read from multiple multi-channel streams
\r
1779 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1780 Float32 *out, *in;
\r
1782 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1783 UInt32 outChannels = stream_.nUserChannels[1];
\r
1784 if ( stream_.doConvertBuffer[1] ) {
\r
1785 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1786 outChannels = stream_.nDeviceChannels[1];
\r
1789 if ( outInterleaved ) outOffset = 1;
\r
1790 else outOffset = stream_.bufferSize;
\r
1792 channelsLeft = outChannels;
\r
1793 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1795 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1796 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1799 // Account for possible channel offset in first stream
\r
1800 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1801 streamChannels -= stream_.channelOffset[1];
\r
1802 inJump = stream_.channelOffset[1];
\r
1806 // Account for possible unread channels at end of the last stream
\r
1807 if ( streamChannels > channelsLeft ) {
\r
1808 inJump = streamChannels - channelsLeft;
\r
1809 streamChannels = channelsLeft;
\r
1812 // Determine output buffer offsets and skips
\r
1813 if ( outInterleaved ) {
\r
1814 outJump = outChannels;
\r
1815 out += outChannels - channelsLeft;
\r
1819 out += (outChannels - channelsLeft) * outOffset;
\r
1822 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1823 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1824 out[j*outOffset] = *in++;
\r
1829 channelsLeft -= streamChannels;
\r
1833 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1834 convertBuffer( stream_.userBuffer[1],
\r
1835 stream_.deviceBuffer,
\r
1836 stream_.convertInfo[1] );
\r
1842 //MUTEX_UNLOCK( &stream_.mutex );
\r
1844 RtApi::tickStreamTime();
\r
1848 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1852 case kAudioHardwareNotRunningError:
\r
1853 return "kAudioHardwareNotRunningError";
\r
1855 case kAudioHardwareUnspecifiedError:
\r
1856 return "kAudioHardwareUnspecifiedError";
\r
1858 case kAudioHardwareUnknownPropertyError:
\r
1859 return "kAudioHardwareUnknownPropertyError";
\r
1861 case kAudioHardwareBadPropertySizeError:
\r
1862 return "kAudioHardwareBadPropertySizeError";
\r
1864 case kAudioHardwareIllegalOperationError:
\r
1865 return "kAudioHardwareIllegalOperationError";
\r
1867 case kAudioHardwareBadObjectError:
\r
1868 return "kAudioHardwareBadObjectError";
\r
1870 case kAudioHardwareBadDeviceError:
\r
1871 return "kAudioHardwareBadDeviceError";
\r
1873 case kAudioHardwareBadStreamError:
\r
1874 return "kAudioHardwareBadStreamError";
\r
1876 case kAudioHardwareUnsupportedOperationError:
\r
1877 return "kAudioHardwareUnsupportedOperationError";
\r
1879 case kAudioDeviceUnsupportedFormatError:
\r
1880 return "kAudioDeviceUnsupportedFormatError";
\r
1882 case kAudioDevicePermissionsError:
\r
1883 return "kAudioDevicePermissionsError";
\r
1886 return "CoreAudio unknown error";
\r
1890 //******************** End of __MACOSX_CORE__ *********************//
\r
1893 #if defined(__UNIX_JACK__)
\r
1895 // JACK is a low-latency audio server, originally written for the
\r
1896 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1897 // connect a number of different applications to an audio device, as
\r
1898 // well as allowing them to share audio between themselves.
\r
1900 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1901 // have ports connected to the server. The JACK server is typically
\r
1902 // started in a terminal as follows:
\r
1904 // .jackd -d alsa -d hw:0
\r
1906 // or through an interface program such as qjackctl. Many of the
\r
1907 // parameters normally set for a stream are fixed by the JACK server
\r
1908 // and can be specified when the JACK server is started. In
\r
1911 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1913 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1914 // frames, and number of buffers = 4. Once the server is running, it
\r
1915 // is not possible to override these values. If the values are not
\r
1916 // specified in the command-line, the JACK server uses default values.
\r
1918 // The JACK server does not have to be running when an instance of
\r
1919 // RtApiJack is created, though the function getDeviceCount() will
\r
1920 // report 0 devices found until JACK has been started. When no
\r
1921 // devices are available (i.e., the JACK server is not running), a
\r
1922 // stream cannot be opened.
\r
1924 #include <jack/jack.h>
\r
1925 #include <unistd.h>
\r
1928 // A structure to hold various information related to the Jack API
\r
1929 // implementation.
\r
1930 struct JackHandle {
\r
1931 jack_client_t *client;
\r
1932 jack_port_t **ports[2];
\r
1933 std::string deviceName[2];
\r
1935 pthread_cond_t condition;
\r
1936 int drainCounter; // Tracks callback counts when draining
\r
1937 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1940 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1943 static void jackSilentError( const char * ) {};
\r
1945 RtApiJack :: RtApiJack()
\r
1947 // Nothing to do here.
\r
1948 #if !defined(__RTAUDIO_DEBUG__)
\r
1949 // Turn off Jack's internal error reporting.
\r
1950 jack_set_error_function( &jackSilentError );
\r
1954 RtApiJack :: ~RtApiJack()
\r
1956 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1959 unsigned int RtApiJack :: getDeviceCount( void )
\r
1961 // See if we can become a jack client.
\r
1962 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1963 jack_status_t *status = NULL;
\r
1964 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1965 if ( client == 0 ) return 0;
\r
1967 const char **ports;
\r
1968 std::string port, previousPort;
\r
1969 unsigned int nChannels = 0, nDevices = 0;
\r
1970 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1972 // Parse the port names up to the first colon (:).
\r
1973 size_t iColon = 0;
\r
1975 port = (char *) ports[ nChannels ];
\r
1976 iColon = port.find(":");
\r
1977 if ( iColon != std::string::npos ) {
\r
1978 port = port.substr( 0, iColon + 1 );
\r
1979 if ( port != previousPort ) {
\r
1981 previousPort = port;
\r
1984 } while ( ports[++nChannels] );
\r
1988 jack_client_close( client );
\r
1992 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1994 RtAudio::DeviceInfo info;
\r
1995 info.probed = false;
\r
1997 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1998 jack_status_t *status = NULL;
\r
1999 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2000 if ( client == 0 ) {
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 const char **ports;
\r
2007 std::string port, previousPort;
\r
2008 unsigned int nPorts = 0, nDevices = 0;
\r
2009 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2011 // Parse the port names up to the first colon (:).
\r
2012 size_t iColon = 0;
\r
2014 port = (char *) ports[ nPorts ];
\r
2015 iColon = port.find(":");
\r
2016 if ( iColon != std::string::npos ) {
\r
2017 port = port.substr( 0, iColon );
\r
2018 if ( port != previousPort ) {
\r
2019 if ( nDevices == device ) info.name = port;
\r
2021 previousPort = port;
\r
2024 } while ( ports[++nPorts] );
\r
2028 if ( device >= nDevices ) {
\r
2029 jack_client_close( client );
\r
2030 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2031 error( RtAudioError::INVALID_USE );
\r
2035 // Get the current jack server sample rate.
\r
2036 info.sampleRates.clear();
\r
2038 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2039 info.sampleRates.push_back( info.preferredSampleRate );
\r
2041 // Count the available ports containing the client name as device
\r
2042 // channels. Jack "input ports" equal RtAudio output channels.
\r
2043 unsigned int nChannels = 0;
\r
2044 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2046 while ( ports[ nChannels ] ) nChannels++;
\r
2048 info.outputChannels = nChannels;
\r
2051 // Jack "output ports" equal RtAudio input channels.
\r
2053 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2055 while ( ports[ nChannels ] ) nChannels++;
\r
2057 info.inputChannels = nChannels;
\r
2060 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2061 jack_client_close(client);
\r
2062 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2063 error( RtAudioError::WARNING );
\r
2067 // If device opens for both playback and capture, we determine the channels.
\r
2068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2071 // Jack always uses 32-bit floats.
\r
2072 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2074 // Jack doesn't provide default devices so we'll use the first available one.
\r
2075 if ( device == 0 && info.outputChannels > 0 )
\r
2076 info.isDefaultOutput = true;
\r
2077 if ( device == 0 && info.inputChannels > 0 )
\r
2078 info.isDefaultInput = true;
\r
2080 jack_client_close(client);
\r
2081 info.probed = true;
\r
2085 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2087 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2089 RtApiJack *object = (RtApiJack *) info->object;
\r
2090 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2095 // This function will be called by a spawned thread when the Jack
\r
2096 // server signals that it is shutting down. It is necessary to handle
\r
2097 // it this way because the jackShutdown() function must return before
\r
2098 // the jack_deactivate() function (in closeStream()) will return.
\r
2099 static void *jackCloseStream( void *ptr )
\r
2101 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2102 RtApiJack *object = (RtApiJack *) info->object;
\r
2104 object->closeStream();
\r
2106 pthread_exit( NULL );
\r
2108 static void jackShutdown( void *infoPointer )
\r
2110 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2111 RtApiJack *object = (RtApiJack *) info->object;
\r
2113 // Check current stream state. If stopped, then we'll assume this
\r
2114 // was called as a result of a call to RtApiJack::stopStream (the
\r
2115 // deactivation of a client handle causes this function to be called).
\r
2116 // If not, we'll assume the Jack server is shutting down or some
\r
2117 // other problem occurred and we should close the stream.
\r
2118 if ( object->isStreamRunning() == false ) return;
\r
2120 ThreadHandle threadId;
\r
2121 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2122 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2125 static int jackXrun( void *infoPointer )
\r
2127 JackHandle *handle = (JackHandle *) infoPointer;
\r
2129 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2130 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2135 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2136 unsigned int firstChannel, unsigned int sampleRate,
\r
2137 RtAudioFormat format, unsigned int *bufferSize,
\r
2138 RtAudio::StreamOptions *options )
\r
2140 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2142 // Look for jack server and try to become a client (only do once per stream).
\r
2143 jack_client_t *client = 0;
\r
2144 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2145 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2146 jack_status_t *status = NULL;
\r
2147 if ( options && !options->streamName.empty() )
\r
2148 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2150 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2151 if ( client == 0 ) {
\r
2152 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2153 error( RtAudioError::WARNING );
\r
2158 // The handle must have been created on an earlier pass.
\r
2159 client = handle->client;
\r
2162 const char **ports;
\r
2163 std::string port, previousPort, deviceName;
\r
2164 unsigned int nPorts = 0, nDevices = 0;
\r
2165 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2167 // Parse the port names up to the first colon (:).
\r
2168 size_t iColon = 0;
\r
2170 port = (char *) ports[ nPorts ];
\r
2171 iColon = port.find(":");
\r
2172 if ( iColon != std::string::npos ) {
\r
2173 port = port.substr( 0, iColon );
\r
2174 if ( port != previousPort ) {
\r
2175 if ( nDevices == device ) deviceName = port;
\r
2177 previousPort = port;
\r
2180 } while ( ports[++nPorts] );
\r
2184 if ( device >= nDevices ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2189 // Count the available ports containing the client name as device
\r
2190 // channels. Jack "input ports" equal RtAudio output channels.
\r
2191 unsigned int nChannels = 0;
\r
2192 unsigned long flag = JackPortIsInput;
\r
2193 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2196 while ( ports[ nChannels ] ) nChannels++;
\r
2200 // Compare the jack ports for specified client to the requested number of channels.
\r
2201 if ( nChannels < (channels + firstChannel) ) {
\r
2202 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2203 errorText_ = errorStream_.str();
\r
2207 // Check the jack server sample rate.
\r
2208 unsigned int jackRate = jack_get_sample_rate( client );
\r
2209 if ( sampleRate != jackRate ) {
\r
2210 jack_client_close( client );
\r
2211 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2212 errorText_ = errorStream_.str();
\r
2215 stream_.sampleRate = jackRate;
\r
2217 // Get the latency of the JACK port.
\r
2218 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2219 if ( ports[ firstChannel ] ) {
\r
2220 // Added by Ge Wang
\r
2221 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2222 // the range (usually the min and max are equal)
\r
2223 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2224 // get the latency range
\r
2225 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2226 // be optimistic, use the min!
\r
2227 stream_.latency[mode] = latrange.min;
\r
2228 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2232 // The jack server always uses 32-bit floating-point data.
\r
2233 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2234 stream_.userFormat = format;
\r
2236 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2237 else stream_.userInterleaved = true;
\r
2239 // Jack always uses non-interleaved buffers.
\r
2240 stream_.deviceInterleaved[mode] = false;
\r
2242 // Jack always provides host byte-ordered data.
\r
2243 stream_.doByteSwap[mode] = false;
\r
2245 // Get the buffer size. The buffer size and number of buffers
\r
2246 // (periods) is set when the jack server is started.
\r
2247 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2248 *bufferSize = stream_.bufferSize;
\r
2250 stream_.nDeviceChannels[mode] = channels;
\r
2251 stream_.nUserChannels[mode] = channels;
\r
2253 // Set flags for buffer conversion.
\r
2254 stream_.doConvertBuffer[mode] = false;
\r
2255 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2256 stream_.doConvertBuffer[mode] = true;
\r
2257 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2258 stream_.nUserChannels[mode] > 1 )
\r
2259 stream_.doConvertBuffer[mode] = true;
\r
2261 // Allocate our JackHandle structure for the stream.
\r
2262 if ( handle == 0 ) {
\r
2264 handle = new JackHandle;
\r
2266 catch ( std::bad_alloc& ) {
\r
2267 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2271 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2272 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2275 stream_.apiHandle = (void *) handle;
\r
2276 handle->client = client;
\r
2278 handle->deviceName[mode] = deviceName;
\r
2280 // Allocate necessary internal buffers.
\r
2281 unsigned long bufferBytes;
\r
2282 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2283 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2284 if ( stream_.userBuffer[mode] == NULL ) {
\r
2285 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2289 if ( stream_.doConvertBuffer[mode] ) {
\r
2291 bool makeBuffer = true;
\r
2292 if ( mode == OUTPUT )
\r
2293 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2294 else { // mode == INPUT
\r
2295 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2296 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2297 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2298 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2302 if ( makeBuffer ) {
\r
2303 bufferBytes *= *bufferSize;
\r
2304 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2305 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2306 if ( stream_.deviceBuffer == NULL ) {
\r
2307 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2313 // Allocate memory for the Jack ports (channels) identifiers.
\r
2314 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2315 if ( handle->ports[mode] == NULL ) {
\r
2316 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2320 stream_.device[mode] = device;
\r
2321 stream_.channelOffset[mode] = firstChannel;
\r
2322 stream_.state = STREAM_STOPPED;
\r
2323 stream_.callbackInfo.object = (void *) this;
\r
2325 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2326 // We had already set up the stream for output.
\r
2327 stream_.mode = DUPLEX;
\r
2329 stream_.mode = mode;
\r
2330 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2331 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2332 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2335 // Register our ports.
\r
2337 if ( mode == OUTPUT ) {
\r
2338 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2339 snprintf( label, 64, "outport %d", i );
\r
2340 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2341 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2345 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2346 snprintf( label, 64, "inport %d", i );
\r
2347 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2348 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2352 // Setup the buffer conversion information structure. We don't use
\r
2353 // buffers to do channel offsets, so we override that parameter
\r
2355 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2361 pthread_cond_destroy( &handle->condition );
\r
2362 jack_client_close( handle->client );
\r
2364 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2365 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2368 stream_.apiHandle = 0;
\r
2371 for ( int i=0; i<2; i++ ) {
\r
2372 if ( stream_.userBuffer[i] ) {
\r
2373 free( stream_.userBuffer[i] );
\r
2374 stream_.userBuffer[i] = 0;
\r
2378 if ( stream_.deviceBuffer ) {
\r
2379 free( stream_.deviceBuffer );
\r
2380 stream_.deviceBuffer = 0;
\r
2386 void RtApiJack :: closeStream( void )
\r
2388 if ( stream_.state == STREAM_CLOSED ) {
\r
2389 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2390 error( RtAudioError::WARNING );
\r
2394 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2397 if ( stream_.state == STREAM_RUNNING )
\r
2398 jack_deactivate( handle->client );
\r
2400 jack_client_close( handle->client );
\r
2404 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2405 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2406 pthread_cond_destroy( &handle->condition );
\r
2408 stream_.apiHandle = 0;
\r
2411 for ( int i=0; i<2; i++ ) {
\r
2412 if ( stream_.userBuffer[i] ) {
\r
2413 free( stream_.userBuffer[i] );
\r
2414 stream_.userBuffer[i] = 0;
\r
2418 if ( stream_.deviceBuffer ) {
\r
2419 free( stream_.deviceBuffer );
\r
2420 stream_.deviceBuffer = 0;
\r
2423 stream_.mode = UNINITIALIZED;
\r
2424 stream_.state = STREAM_CLOSED;
\r
2427 void RtApiJack :: startStream( void )
\r
2430 if ( stream_.state == STREAM_RUNNING ) {
\r
2431 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2432 error( RtAudioError::WARNING );
\r
2436 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2437 int result = jack_activate( handle->client );
\r
2439 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2443 const char **ports;
\r
2445 // Get the list of available ports.
\r
2446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2454 // Now make the port connections. Since RtAudio wasn't designed to
\r
2455 // allow the user to select particular channels of a device, we'll
\r
2456 // just open the first "nChannels" ports with offset.
\r
2457 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2459 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2460 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2463 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2470 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2472 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2473 if ( ports == NULL) {
\r
2474 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2478 // Now make the port connections. See note above.
\r
2479 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2481 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2482 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2485 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2492 handle->drainCounter = 0;
\r
2493 handle->internalDrain = false;
\r
2494 stream_.state = STREAM_RUNNING;
\r
2497 if ( result == 0 ) return;
\r
2498 error( RtAudioError::SYSTEM_ERROR );
\r
2501 void RtApiJack :: stopStream( void )
\r
2504 if ( stream_.state == STREAM_STOPPED ) {
\r
2505 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2506 error( RtAudioError::WARNING );
\r
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2511 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2513 if ( handle->drainCounter == 0 ) {
\r
2514 handle->drainCounter = 2;
\r
2515 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2519 jack_deactivate( handle->client );
\r
2520 stream_.state = STREAM_STOPPED;
\r
2523 void RtApiJack :: abortStream( void )
\r
2526 if ( stream_.state == STREAM_STOPPED ) {
\r
2527 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2528 error( RtAudioError::WARNING );
\r
2532 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2533 handle->drainCounter = 2;
\r
2538 // This function will be called by a spawned thread when the user
\r
2539 // callback function signals that the stream should be stopped or
\r
2540 // aborted. It is necessary to handle it this way because the
\r
2541 // callbackEvent() function must return before the jack_deactivate()
\r
2542 // function will return.
\r
2543 static void *jackStopStream( void *ptr )
\r
2545 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2546 RtApiJack *object = (RtApiJack *) info->object;
\r
2548 object->stopStream();
\r
2549 pthread_exit( NULL );
\r
2552 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2554 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2555 if ( stream_.state == STREAM_CLOSED ) {
\r
2556 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2557 error( RtAudioError::WARNING );
\r
2560 if ( stream_.bufferSize != nframes ) {
\r
2561 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2562 error( RtAudioError::WARNING );
\r
2566 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2567 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2569 // Check if we were draining the stream and signal is finished.
\r
2570 if ( handle->drainCounter > 3 ) {
\r
2571 ThreadHandle threadId;
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 if ( handle->internalDrain == true )
\r
2575 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2577 pthread_cond_signal( &handle->condition );
\r
2581 // Invoke user callback first, to get fresh output data.
\r
2582 if ( handle->drainCounter == 0 ) {
\r
2583 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2584 double streamTime = getStreamTime();
\r
2585 RtAudioStreamStatus status = 0;
\r
2586 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2587 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2588 handle->xrun[0] = false;
\r
2590 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2591 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2592 handle->xrun[1] = false;
\r
2594 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2595 stream_.bufferSize, streamTime, status, info->userData );
\r
2596 if ( cbReturnValue == 2 ) {
\r
2597 stream_.state = STREAM_STOPPING;
\r
2598 handle->drainCounter = 2;
\r
2600 pthread_create( &id, NULL, jackStopStream, info );
\r
2603 else if ( cbReturnValue == 1 ) {
\r
2604 handle->drainCounter = 1;
\r
2605 handle->internalDrain = true;
\r
2609 jack_default_audio_sample_t *jackbuffer;
\r
2610 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2611 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2613 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2615 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2616 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2617 memset( jackbuffer, 0, bufferBytes );
\r
2621 else if ( stream_.doConvertBuffer[0] ) {
\r
2623 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2627 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2630 else { // no buffer conversion
\r
2631 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2632 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2633 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2638 // Don't bother draining input
\r
2639 if ( handle->drainCounter ) {
\r
2640 handle->drainCounter++;
\r
2644 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2646 if ( stream_.doConvertBuffer[1] ) {
\r
2647 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2648 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2649 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2651 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2653 else { // no buffer conversion
\r
2654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2655 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2656 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2662 RtApi::tickStreamTime();
\r
2665 //******************** End of __UNIX_JACK__ *********************//
\r
2668 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2670 // The ASIO API is designed around a callback scheme, so this
\r
2671 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2672 // Jack. The primary constraint with ASIO is that it only allows
\r
2673 // access to a single driver at a time. Thus, it is not possible to
\r
2674 // have more than one simultaneous RtAudio stream.
\r
2676 // This implementation also requires a number of external ASIO files
\r
2677 // and a few global variables. The ASIO callback scheme does not
\r
2678 // allow for the passing of user data, so we must create a global
\r
2679 // pointer to our callbackInfo structure.
\r
2681 // On unix systems, we make use of a pthread condition variable.
\r
2682 // Since there is no equivalent in Windows, I hacked something based
\r
2683 // on information found in
\r
2684 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2686 #include "asiosys.h"
\r
2688 #include "iasiothiscallresolver.h"
\r
2689 #include "asiodrivers.h"
\r
2692 static AsioDrivers drivers;
\r
2693 static ASIOCallbacks asioCallbacks;
\r
2694 static ASIODriverInfo driverInfo;
\r
2695 static CallbackInfo *asioCallbackInfo;
\r
2696 static bool asioXRun;
\r
2698 struct AsioHandle {
\r
2699 int drainCounter; // Tracks callback counts when draining
\r
2700 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2701 ASIOBufferInfo *bufferInfos;
\r
2705 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2708 // Function declarations (definitions at end of section)
\r
2709 static const char* getAsioErrorString( ASIOError result );
\r
2710 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2711 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2713 RtApiAsio :: RtApiAsio()
\r
2715 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2716 // CoInitialize beforehand, but it must be for appartment threading
\r
2717 // (in which case, CoInitilialize will return S_FALSE here).
\r
2718 coInitialized_ = false;
\r
2719 HRESULT hr = CoInitialize( NULL );
\r
2720 if ( FAILED(hr) ) {
\r
2721 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2722 error( RtAudioError::WARNING );
\r
2724 coInitialized_ = true;
\r
2726 drivers.removeCurrentDriver();
\r
2727 driverInfo.asioVersion = 2;
\r
2729 // See note in DirectSound implementation about GetDesktopWindow().
\r
2730 driverInfo.sysRef = GetForegroundWindow();
\r
2733 RtApiAsio :: ~RtApiAsio()
\r
2735 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2736 if ( coInitialized_ ) CoUninitialize();
\r
2739 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2741 return (unsigned int) drivers.asioGetNumDev();
\r
2744 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2746 RtAudio::DeviceInfo info;
\r
2747 info.probed = false;
\r
2750 unsigned int nDevices = getDeviceCount();
\r
2751 if ( nDevices == 0 ) {
\r
2752 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2753 error( RtAudioError::INVALID_USE );
\r
2757 if ( device >= nDevices ) {
\r
2758 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2759 error( RtAudioError::INVALID_USE );
\r
2763 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2764 if ( stream_.state != STREAM_CLOSED ) {
\r
2765 if ( device >= devices_.size() ) {
\r
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2767 error( RtAudioError::WARNING );
\r
2770 return devices_[ device ];
\r
2773 char driverName[32];
\r
2774 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2775 if ( result != ASE_OK ) {
\r
2776 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2777 errorText_ = errorStream_.str();
\r
2778 error( RtAudioError::WARNING );
\r
2782 info.name = driverName;
\r
2784 if ( !drivers.loadDriver( driverName ) ) {
\r
2785 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2786 errorText_ = errorStream_.str();
\r
2787 error( RtAudioError::WARNING );
\r
2791 result = ASIOInit( &driverInfo );
\r
2792 if ( result != ASE_OK ) {
\r
2793 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2794 errorText_ = errorStream_.str();
\r
2795 error( RtAudioError::WARNING );
\r
2799 // Determine the device channel information.
\r
2800 long inputChannels, outputChannels;
\r
2801 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2802 if ( result != ASE_OK ) {
\r
2803 drivers.removeCurrentDriver();
\r
2804 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2805 errorText_ = errorStream_.str();
\r
2806 error( RtAudioError::WARNING );
\r
2810 info.outputChannels = outputChannels;
\r
2811 info.inputChannels = inputChannels;
\r
2812 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2813 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2815 // Determine the supported sample rates.
\r
2816 info.sampleRates.clear();
\r
2817 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2818 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2819 if ( result == ASE_OK ) {
\r
2820 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2822 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2823 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2827 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2828 ASIOChannelInfo channelInfo;
\r
2829 channelInfo.channel = 0;
\r
2830 channelInfo.isInput = true;
\r
2831 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2832 result = ASIOGetChannelInfo( &channelInfo );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2837 error( RtAudioError::WARNING );
\r
2841 info.nativeFormats = 0;
\r
2842 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2843 info.nativeFormats |= RTAUDIO_SINT16;
\r
2844 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2845 info.nativeFormats |= RTAUDIO_SINT32;
\r
2846 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2847 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2848 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2849 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2850 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2851 info.nativeFormats |= RTAUDIO_SINT24;
\r
2853 if ( info.outputChannels > 0 )
\r
2854 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2855 if ( info.inputChannels > 0 )
\r
2856 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2858 info.probed = true;
\r
2859 drivers.removeCurrentDriver();
\r
2863 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2865 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2866 object->callbackEvent( index );
\r
2869 void RtApiAsio :: saveDeviceInfo( void )
\r
2873 unsigned int nDevices = getDeviceCount();
\r
2874 devices_.resize( nDevices );
\r
2875 for ( unsigned int i=0; i<nDevices; i++ )
\r
2876 devices_[i] = getDeviceInfo( i );
\r
2879 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2880 unsigned int firstChannel, unsigned int sampleRate,
\r
2881 RtAudioFormat format, unsigned int *bufferSize,
\r
2882 RtAudio::StreamOptions *options )
\r
2883 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2885 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2887 // For ASIO, a duplex stream MUST use the same driver.
\r
2888 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2889 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2893 char driverName[32];
\r
2894 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2895 if ( result != ASE_OK ) {
\r
2896 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2897 errorText_ = errorStream_.str();
\r
2901 // Only load the driver once for duplex stream.
\r
2902 if ( !isDuplexInput ) {
\r
2903 // The getDeviceInfo() function will not work when a stream is open
\r
2904 // because ASIO does not allow multiple devices to run at the same
\r
2905 // time. Thus, we'll probe the system before opening a stream and
\r
2906 // save the results for use by getDeviceInfo().
\r
2907 this->saveDeviceInfo();
\r
2909 if ( !drivers.loadDriver( driverName ) ) {
\r
2910 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2911 errorText_ = errorStream_.str();
\r
2915 result = ASIOInit( &driverInfo );
\r
2916 if ( result != ASE_OK ) {
\r
2917 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2918 errorText_ = errorStream_.str();
\r
2923 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2924 bool buffersAllocated = false;
\r
2925 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2926 unsigned int nChannels;
\r
2929 // Check the device channel count.
\r
2930 long inputChannels, outputChannels;
\r
2931 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2932 if ( result != ASE_OK ) {
\r
2933 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2934 errorText_ = errorStream_.str();
\r
2938 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2939 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2940 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2941 errorText_ = errorStream_.str();
\r
2944 stream_.nDeviceChannels[mode] = channels;
\r
2945 stream_.nUserChannels[mode] = channels;
\r
2946 stream_.channelOffset[mode] = firstChannel;
\r
2948 // Verify the sample rate is supported.
\r
2949 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2950 if ( result != ASE_OK ) {
\r
2951 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2952 errorText_ = errorStream_.str();
\r
2956 // Get the current sample rate
\r
2957 ASIOSampleRate currentRate;
\r
2958 result = ASIOGetSampleRate( ¤tRate );
\r
2959 if ( result != ASE_OK ) {
\r
2960 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2961 errorText_ = errorStream_.str();
\r
2965 // Set the sample rate only if necessary
\r
2966 if ( currentRate != sampleRate ) {
\r
2967 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2968 if ( result != ASE_OK ) {
\r
2969 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2970 errorText_ = errorStream_.str();
\r
2975 // Determine the driver data type.
\r
2976 ASIOChannelInfo channelInfo;
\r
2977 channelInfo.channel = 0;
\r
2978 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2979 else channelInfo.isInput = true;
\r
2980 result = ASIOGetChannelInfo( &channelInfo );
\r
2981 if ( result != ASE_OK ) {
\r
2982 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2983 errorText_ = errorStream_.str();
\r
2987 // Assuming WINDOWS host is always little-endian.
\r
2988 stream_.doByteSwap[mode] = false;
\r
2989 stream_.userFormat = format;
\r
2990 stream_.deviceFormat[mode] = 0;
\r
2991 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2992 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2993 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2995 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2996 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2997 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2999 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3000 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3001 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3003 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3004 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3005 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3007 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3008 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3009 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3012 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3013 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3014 errorText_ = errorStream_.str();
\r
3018 // Set the buffer size. For a duplex stream, this will end up
\r
3019 // setting the buffer size based on the input constraints, which
\r
3021 long minSize, maxSize, preferSize, granularity;
\r
3022 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3023 if ( result != ASE_OK ) {
\r
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3025 errorText_ = errorStream_.str();
\r
3029 if ( isDuplexInput ) {
\r
3030 // When this is the duplex input (output was opened before), then we have to use the same
\r
3031 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3032 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3033 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3034 // to the "bufferSize" param as usual to set up processing buffers.
\r
3036 *bufferSize = stream_.bufferSize;
\r
3039 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3040 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3041 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3042 else if ( granularity == -1 ) {
\r
3043 // Make sure bufferSize is a power of two.
\r
3044 int log2_of_min_size = 0;
\r
3045 int log2_of_max_size = 0;
\r
3047 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3048 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3049 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3052 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3053 int min_delta_num = log2_of_min_size;
\r
3055 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3056 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3057 if (current_delta < min_delta) {
\r
3058 min_delta = current_delta;
\r
3059 min_delta_num = i;
\r
3063 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3064 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3065 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3067 else if ( granularity != 0 ) {
\r
3068 // Set to an even multiple of granularity, rounding up.
\r
3069 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3074 // we don't use it anymore, see above!
\r
3075 // Just left it here for the case...
\r
3076 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3077 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3082 stream_.bufferSize = *bufferSize;
\r
3083 stream_.nBuffers = 2;
\r
3085 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3086 else stream_.userInterleaved = true;
\r
3088 // ASIO always uses non-interleaved buffers.
\r
3089 stream_.deviceInterleaved[mode] = false;
\r
3091 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3092 if ( handle == 0 ) {
\r
3094 handle = new AsioHandle;
\r
3096 catch ( std::bad_alloc& ) {
\r
3097 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3100 handle->bufferInfos = 0;
\r
3102 // Create a manual-reset event.
\r
3103 handle->condition = CreateEvent( NULL, // no security
\r
3104 TRUE, // manual-reset
\r
3105 FALSE, // non-signaled initially
\r
3106 NULL ); // unnamed
\r
3107 stream_.apiHandle = (void *) handle;
\r
3110 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3111 // and output separately, we'll have to dispose of previously
\r
3112 // created output buffers for a duplex stream.
\r
3113 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3114 ASIODisposeBuffers();
\r
3115 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3118 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3120 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3121 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3122 if ( handle->bufferInfos == NULL ) {
\r
3123 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3124 errorText_ = errorStream_.str();
\r
3128 ASIOBufferInfo *infos;
\r
3129 infos = handle->bufferInfos;
\r
3130 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3131 infos->isInput = ASIOFalse;
\r
3132 infos->channelNum = i + stream_.channelOffset[0];
\r
3133 infos->buffers[0] = infos->buffers[1] = 0;
\r
3135 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3136 infos->isInput = ASIOTrue;
\r
3137 infos->channelNum = i + stream_.channelOffset[1];
\r
3138 infos->buffers[0] = infos->buffers[1] = 0;
\r
3141 // prepare for callbacks
\r
3142 stream_.sampleRate = sampleRate;
\r
3143 stream_.device[mode] = device;
\r
3144 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3146 // store this class instance before registering callbacks, that are going to use it
\r
3147 asioCallbackInfo = &stream_.callbackInfo;
\r
3148 stream_.callbackInfo.object = (void *) this;
\r
3150 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3151 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3152 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3153 asioCallbacks.asioMessage = &asioMessages;
\r
3154 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3155 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3156 if ( result != ASE_OK ) {
\r
3157 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3158 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3159 // in that case, let's be naïve and try that instead
\r
3160 *bufferSize = preferSize;
\r
3161 stream_.bufferSize = *bufferSize;
\r
3162 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3165 if ( result != ASE_OK ) {
\r
3166 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3167 errorText_ = errorStream_.str();
\r
3170 buffersAllocated = true;
\r
3171 stream_.state = STREAM_STOPPED;
\r
3173 // Set flags for buffer conversion.
\r
3174 stream_.doConvertBuffer[mode] = false;
\r
3175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3176 stream_.doConvertBuffer[mode] = true;
\r
3177 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3178 stream_.nUserChannels[mode] > 1 )
\r
3179 stream_.doConvertBuffer[mode] = true;
\r
3181 // Allocate necessary internal buffers
\r
3182 unsigned long bufferBytes;
\r
3183 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3184 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3185 if ( stream_.userBuffer[mode] == NULL ) {
\r
3186 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3190 if ( stream_.doConvertBuffer[mode] ) {
\r
3192 bool makeBuffer = true;
\r
3193 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3194 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3195 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3196 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3199 if ( makeBuffer ) {
\r
3200 bufferBytes *= *bufferSize;
\r
3201 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3202 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3203 if ( stream_.deviceBuffer == NULL ) {
\r
3204 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3210 // Determine device latencies
\r
3211 long inputLatency, outputLatency;
\r
3212 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3213 if ( result != ASE_OK ) {
\r
3214 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3215 errorText_ = errorStream_.str();
\r
3216 error( RtAudioError::WARNING); // warn but don't fail
\r
3219 stream_.latency[0] = outputLatency;
\r
3220 stream_.latency[1] = inputLatency;
\r
3223 // Setup the buffer conversion information structure. We don't use
\r
3224 // buffers to do channel offsets, so we override that parameter
\r
3226 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3231 if ( !isDuplexInput ) {
\r
3232 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3233 // So we clean up for single channel only
\r
3235 if ( buffersAllocated )
\r
3236 ASIODisposeBuffers();
\r
3238 drivers.removeCurrentDriver();
\r
3241 CloseHandle( handle->condition );
\r
3242 if ( handle->bufferInfos )
\r
3243 free( handle->bufferInfos );
\r
3246 stream_.apiHandle = 0;
\r
3250 if ( stream_.userBuffer[mode] ) {
\r
3251 free( stream_.userBuffer[mode] );
\r
3252 stream_.userBuffer[mode] = 0;
\r
3255 if ( stream_.deviceBuffer ) {
\r
3256 free( stream_.deviceBuffer );
\r
3257 stream_.deviceBuffer = 0;
\r
3262 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3264 void RtApiAsio :: closeStream()
\r
3266 if ( stream_.state == STREAM_CLOSED ) {
\r
3267 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3268 error( RtAudioError::WARNING );
\r
3272 if ( stream_.state == STREAM_RUNNING ) {
\r
3273 stream_.state = STREAM_STOPPED;
\r
3276 ASIODisposeBuffers();
\r
3277 drivers.removeCurrentDriver();
\r
3279 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3281 CloseHandle( handle->condition );
\r
3282 if ( handle->bufferInfos )
\r
3283 free( handle->bufferInfos );
\r
3285 stream_.apiHandle = 0;
\r
3288 for ( int i=0; i<2; i++ ) {
\r
3289 if ( stream_.userBuffer[i] ) {
\r
3290 free( stream_.userBuffer[i] );
\r
3291 stream_.userBuffer[i] = 0;
\r
3295 if ( stream_.deviceBuffer ) {
\r
3296 free( stream_.deviceBuffer );
\r
3297 stream_.deviceBuffer = 0;
\r
3300 stream_.mode = UNINITIALIZED;
\r
3301 stream_.state = STREAM_CLOSED;
\r
3304 bool stopThreadCalled = false;
\r
3306 void RtApiAsio :: startStream()
\r
3309 if ( stream_.state == STREAM_RUNNING ) {
\r
3310 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3311 error( RtAudioError::WARNING );
\r
3315 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3316 ASIOError result = ASIOStart();
\r
3317 if ( result != ASE_OK ) {
\r
3318 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3319 errorText_ = errorStream_.str();
\r
3323 handle->drainCounter = 0;
\r
3324 handle->internalDrain = false;
\r
3325 ResetEvent( handle->condition );
\r
3326 stream_.state = STREAM_RUNNING;
\r
3330 stopThreadCalled = false;
\r
3332 if ( result == ASE_OK ) return;
\r
3333 error( RtAudioError::SYSTEM_ERROR );
\r
3336 void RtApiAsio :: stopStream()
\r
3339 if ( stream_.state == STREAM_STOPPED ) {
\r
3340 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3341 error( RtAudioError::WARNING );
\r
3345 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3347 if ( handle->drainCounter == 0 ) {
\r
3348 handle->drainCounter = 2;
\r
3349 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3353 stream_.state = STREAM_STOPPED;
\r
3355 ASIOError result = ASIOStop();
\r
3356 if ( result != ASE_OK ) {
\r
3357 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3358 errorText_ = errorStream_.str();
\r
3361 if ( result == ASE_OK ) return;
\r
3362 error( RtAudioError::SYSTEM_ERROR );
\r
3365 void RtApiAsio :: abortStream()
\r
3368 if ( stream_.state == STREAM_STOPPED ) {
\r
3369 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3370 error( RtAudioError::WARNING );
\r
3374 // The following lines were commented-out because some behavior was
\r
3375 // noted where the device buffers need to be zeroed to avoid
\r
3376 // continuing sound, even when the device buffers are completely
\r
3377 // disposed. So now, calling abort is the same as calling stop.
\r
3378 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3379 // handle->drainCounter = 2;
\r
3383 // This function will be called by a spawned thread when the user
\r
3384 // callback function signals that the stream should be stopped or
\r
3385 // aborted. It is necessary to handle it this way because the
\r
3386 // callbackEvent() function must return before the ASIOStop()
\r
3387 // function will return.
\r
3388 static unsigned __stdcall asioStopStream( void *ptr )
\r
3390 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3391 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3393 object->stopStream();
\r
3394 _endthreadex( 0 );
\r
3398 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3400 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3401 if ( stream_.state == STREAM_CLOSED ) {
\r
3402 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3403 error( RtAudioError::WARNING );
\r
3407 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3408 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3410 // Check if we were draining the stream and signal if finished.
\r
3411 if ( handle->drainCounter > 3 ) {
\r
3413 stream_.state = STREAM_STOPPING;
\r
3414 if ( handle->internalDrain == false )
\r
3415 SetEvent( handle->condition );
\r
3416 else { // spawn a thread to stop the stream
\r
3417 unsigned threadId;
\r
3418 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3419 &stream_.callbackInfo, 0, &threadId );
\r
3424 // Invoke user callback to get fresh output data UNLESS we are
\r
3425 // draining stream.
\r
3426 if ( handle->drainCounter == 0 ) {
\r
3427 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3428 double streamTime = getStreamTime();
\r
3429 RtAudioStreamStatus status = 0;
\r
3430 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3431 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3434 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3435 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3438 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3439 stream_.bufferSize, streamTime, status, info->userData );
\r
3440 if ( cbReturnValue == 2 ) {
\r
3441 stream_.state = STREAM_STOPPING;
\r
3442 handle->drainCounter = 2;
\r
3443 unsigned threadId;
\r
3444 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3445 &stream_.callbackInfo, 0, &threadId );
\r
3448 else if ( cbReturnValue == 1 ) {
\r
3449 handle->drainCounter = 1;
\r
3450 handle->internalDrain = true;
\r
3454 unsigned int nChannels, bufferBytes, i, j;
\r
3455 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3458 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3460 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3462 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3463 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3464 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3468 else if ( stream_.doConvertBuffer[0] ) {
\r
3470 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3471 if ( stream_.doByteSwap[0] )
\r
3472 byteSwapBuffer( stream_.deviceBuffer,
\r
3473 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3474 stream_.deviceFormat[0] );
\r
3476 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3477 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3478 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3479 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3485 if ( stream_.doByteSwap[0] )
\r
3486 byteSwapBuffer( stream_.userBuffer[0],
\r
3487 stream_.bufferSize * stream_.nUserChannels[0],
\r
3488 stream_.userFormat );
\r
3490 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3491 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3492 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3493 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3499 // Don't bother draining input
\r
3500 if ( handle->drainCounter ) {
\r
3501 handle->drainCounter++;
\r
3505 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3507 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3509 if (stream_.doConvertBuffer[1]) {
\r
3511 // Always interleave ASIO input data.
\r
3512 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3513 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3514 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3515 handle->bufferInfos[i].buffers[bufferIndex],
\r
3519 if ( stream_.doByteSwap[1] )
\r
3520 byteSwapBuffer( stream_.deviceBuffer,
\r
3521 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3522 stream_.deviceFormat[1] );
\r
3523 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3527 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3528 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3529 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3530 handle->bufferInfos[i].buffers[bufferIndex],
\r
3535 if ( stream_.doByteSwap[1] )
\r
3536 byteSwapBuffer( stream_.userBuffer[1],
\r
3537 stream_.bufferSize * stream_.nUserChannels[1],
\r
3538 stream_.userFormat );
\r
3543 // The following call was suggested by Malte Clasen. While the API
\r
3544 // documentation indicates it should not be required, some device
\r
3545 // drivers apparently do not function correctly without it.
\r
3546 ASIOOutputReady();
\r
3548 RtApi::tickStreamTime();
\r
3552 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3554 // The ASIO documentation says that this usually only happens during
\r
3555 // external sync. Audio processing is not stopped by the driver,
\r
3556 // actual sample rate might not have even changed, maybe only the
\r
3557 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3560 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3562 object->stopStream();
\r
3564 catch ( RtAudioError &exception ) {
\r
3565 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3569 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3572 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3576 switch( selector ) {
\r
3577 case kAsioSelectorSupported:
\r
3578 if ( value == kAsioResetRequest
\r
3579 || value == kAsioEngineVersion
\r
3580 || value == kAsioResyncRequest
\r
3581 || value == kAsioLatenciesChanged
\r
3582 // The following three were added for ASIO 2.0, you don't
\r
3583 // necessarily have to support them.
\r
3584 || value == kAsioSupportsTimeInfo
\r
3585 || value == kAsioSupportsTimeCode
\r
3586 || value == kAsioSupportsInputMonitor)
\r
3589 case kAsioResetRequest:
\r
3590 // Defer the task and perform the reset of the driver during the
\r
3591 // next "safe" situation. You cannot reset the driver right now,
\r
3592 // as this code is called from the driver. Reset the driver is
\r
3593 // done by completely destruct is. I.e. ASIOStop(),
\r
3594 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3596 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3599 case kAsioResyncRequest:
\r
3600 // This informs the application that the driver encountered some
\r
3601 // non-fatal data loss. It is used for synchronization purposes
\r
3602 // of different media. Added mainly to work around the Win16Mutex
\r
3603 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3604 // which could lose data because the Mutex was held too long by
\r
3605 // another thread. However a driver can issue it in other
\r
3606 // situations, too.
\r
3607 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3611 case kAsioLatenciesChanged:
\r
3612 // This will inform the host application that the drivers were
\r
3613 // latencies changed. Beware, it this does not mean that the
\r
3614 // buffer sizes have changed! You might need to update internal
\r
3616 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3619 case kAsioEngineVersion:
\r
3620 // Return the supported ASIO version of the host application. If
\r
3621 // a host application does not implement this selector, ASIO 1.0
\r
3622 // is assumed by the driver.
\r
3625 case kAsioSupportsTimeInfo:
\r
3626 // Informs the driver whether the
\r
3627 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3628 // For compatibility with ASIO 1.0 drivers the host application
\r
3629 // should always support the "old" bufferSwitch method, too.
\r
3632 case kAsioSupportsTimeCode:
\r
3633 // Informs the driver whether application is interested in time
\r
3634 // code info. If an application does not need to know about time
\r
3635 // code, the driver has less work to do.
\r
3642 static const char* getAsioErrorString( ASIOError result )
\r
3647 const char*message;
\r
3650 static const Messages m[] =
\r
3652 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3653 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3654 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3655 { ASE_InvalidMode, "Invalid mode." },
\r
3656 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3657 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3658 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3661 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3662 if ( m[i].value == result ) return m[i].message;
\r
3664 return "Unknown error.";
\r
3667 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3671 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3673 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3674 // - Introduces support for the Windows WASAPI API
\r
3675 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3676 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3677 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3682 #include <audioclient.h>
\r
3684 #include <mmdeviceapi.h>
\r
3685 #include <functiondiscoverykeys_devpkey.h>
\r
3687 //=============================================================================
\r
3689 #define SAFE_RELEASE( objectPtr )\
\r
3692 objectPtr->Release();\
\r
3693 objectPtr = NULL;\
\r
3696 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3698 //-----------------------------------------------------------------------------
\r
3700 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3701 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3702 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3703 // provide intermediate storage for read / write synchronization.
\r
3704 class WasapiBuffer
\r
3708 : buffer_( NULL ),
\r
3717 // sets the length of the internal ring buffer
\r
3718 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3721 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3723 bufferSize_ = bufferSize;
\r
3728 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3729 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3731 if ( !buffer || // incoming buffer is NULL
\r
3732 bufferSize == 0 || // incoming buffer has no data
\r
3733 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3738 unsigned int relOutIndex = outIndex_;
\r
3739 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3740 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3741 relOutIndex += bufferSize_;
\r
3744 // "in" index can end on the "out" index but cannot begin at it
\r
3745 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3746 return false; // not enough space between "in" index and "out" index
\r
3749 // copy buffer from external to internal
\r
3750 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3751 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3752 int fromInSize = bufferSize - fromZeroSize;
\r
3756 case RTAUDIO_SINT8:
\r
3757 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3758 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3760 case RTAUDIO_SINT16:
\r
3761 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3762 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3764 case RTAUDIO_SINT24:
\r
3765 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3766 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3768 case RTAUDIO_SINT32:
\r
3769 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3770 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3772 case RTAUDIO_FLOAT32:
\r
3773 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3774 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3776 case RTAUDIO_FLOAT64:
\r
3777 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3778 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3782 // update "in" index
\r
3783 inIndex_ += bufferSize;
\r
3784 inIndex_ %= bufferSize_;
\r
3789 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3790 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3792 if ( !buffer || // incoming buffer is NULL
\r
3793 bufferSize == 0 || // incoming buffer has no data
\r
3794 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3799 unsigned int relInIndex = inIndex_;
\r
3800 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3801 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3802 relInIndex += bufferSize_;
\r
3805 // "out" index can begin at and end on the "in" index
\r
3806 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3807 return false; // not enough space between "out" index and "in" index
\r
3810 // copy buffer from internal to external
\r
3811 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3812 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3813 int fromOutSize = bufferSize - fromZeroSize;
\r
3817 case RTAUDIO_SINT8:
\r
3818 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3819 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3821 case RTAUDIO_SINT16:
\r
3822 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3823 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3825 case RTAUDIO_SINT24:
\r
3826 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3827 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3829 case RTAUDIO_SINT32:
\r
3830 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3831 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3833 case RTAUDIO_FLOAT32:
\r
3834 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3835 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3837 case RTAUDIO_FLOAT64:
\r
3838 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3839 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3843 // update "out" index
\r
3844 outIndex_ += bufferSize;
\r
3845 outIndex_ %= bufferSize_;
\r
3852 unsigned int bufferSize_;
\r
3853 unsigned int inIndex_;
\r
3854 unsigned int outIndex_;
\r
3857 //-----------------------------------------------------------------------------
\r
3859 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3860 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3861 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3862 // This sample rate converter works best with conversions between one rate and its multiple.
\r
3863 void convertBufferWasapi( char* outBuffer,
\r
3864 const char* inBuffer,
\r
3865 const unsigned int& channelCount,
\r
3866 const unsigned int& inSampleRate,
\r
3867 const unsigned int& outSampleRate,
\r
3868 const unsigned int& inSampleCount,
\r
3869 unsigned int& outSampleCount,
\r
3870 const RtAudioFormat& format )
\r
3872 // calculate the new outSampleCount and relative sampleStep
\r
3873 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3874 float sampleRatioInv = ( float ) 1 / sampleRatio;
\r
3875 float sampleStep = 1.0f / sampleRatio;
\r
3876 float inSampleFraction = 0.0f;
\r
3878 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3880 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
\r
3881 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
\r
3883 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3884 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3886 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3890 case RTAUDIO_SINT8:
\r
3891 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3893 case RTAUDIO_SINT16:
\r
3894 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3896 case RTAUDIO_SINT24:
\r
3897 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3899 case RTAUDIO_SINT32:
\r
3900 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3902 case RTAUDIO_FLOAT32:
\r
3903 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3905 case RTAUDIO_FLOAT64:
\r
3906 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3910 // jump to next in sample
\r
3911 inSampleFraction += sampleStep;
\r
3914 else // else interpolate
\r
3916 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3917 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3919 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3920 float inSampleDec = inSampleFraction - inSample;
\r
3921 unsigned int frameInSample = inSample * channelCount;
\r
3922 unsigned int frameOutSample = outSample * channelCount;
\r
3926 case RTAUDIO_SINT8:
\r
3928 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3930 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
\r
3931 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3932 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
\r
3933 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3937 case RTAUDIO_SINT16:
\r
3939 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3941 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
\r
3942 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3943 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
\r
3944 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3948 case RTAUDIO_SINT24:
\r
3950 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3952 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
\r
3953 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
\r
3954 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3955 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3959 case RTAUDIO_SINT32:
\r
3961 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3963 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
\r
3964 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3965 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3966 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3970 case RTAUDIO_FLOAT32:
\r
3972 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3974 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
\r
3975 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3976 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3977 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3981 case RTAUDIO_FLOAT64:
\r
3983 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3985 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
\r
3986 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3987 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3988 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3994 // jump to next in sample
\r
3995 inSampleFraction += sampleStep;
\r
4000 //-----------------------------------------------------------------------------
\r
4002 // A structure to hold various information related to the WASAPI implementation.
\r
4003 struct WasapiHandle
\r
4005 IAudioClient* captureAudioClient;
\r
4006 IAudioClient* renderAudioClient;
\r
4007 IAudioCaptureClient* captureClient;
\r
4008 IAudioRenderClient* renderClient;
\r
4009 HANDLE captureEvent;
\r
4010 HANDLE renderEvent;
\r
4013 : captureAudioClient( NULL ),
\r
4014 renderAudioClient( NULL ),
\r
4015 captureClient( NULL ),
\r
4016 renderClient( NULL ),
\r
4017 captureEvent( NULL ),
\r
4018 renderEvent( NULL ) {}
\r
4021 //=============================================================================
\r
4023 RtApiWasapi::RtApiWasapi()
\r
4024 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
4026 // WASAPI can run either apartment or multi-threaded
\r
4027 HRESULT hr = CoInitialize( NULL );
\r
4028 if ( !FAILED( hr ) )
\r
4029 coInitialized_ = true;
\r
4031 // Instantiate device enumerator
\r
4032 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
4033 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
4034 ( void** ) &deviceEnumerator_ );
\r
4036 if ( FAILED( hr ) ) {
\r
4037 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
4038 error( RtAudioError::DRIVER_ERROR );
\r
4042 //-----------------------------------------------------------------------------
\r
4044 RtApiWasapi::~RtApiWasapi()
\r
4046 if ( stream_.state != STREAM_CLOSED )
\r
4049 SAFE_RELEASE( deviceEnumerator_ );
\r
4051 // If this object previously called CoInitialize()
\r
4052 if ( coInitialized_ )
\r
4056 //=============================================================================
\r
4058 unsigned int RtApiWasapi::getDeviceCount( void )
\r
4060 unsigned int captureDeviceCount = 0;
\r
4061 unsigned int renderDeviceCount = 0;
\r
4063 IMMDeviceCollection* captureDevices = NULL;
\r
4064 IMMDeviceCollection* renderDevices = NULL;
\r
4066 // Count capture devices
\r
4067 errorText_.clear();
\r
4068 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4069 if ( FAILED( hr ) ) {
\r
4070 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4074 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4075 if ( FAILED( hr ) ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4080 // Count render devices
\r
4081 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4082 if ( FAILED( hr ) ) {
\r
4083 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4087 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4088 if ( FAILED( hr ) ) {
\r
4089 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4094 // release all references
\r
4095 SAFE_RELEASE( captureDevices );
\r
4096 SAFE_RELEASE( renderDevices );
\r
4098 if ( errorText_.empty() )
\r
4099 return captureDeviceCount + renderDeviceCount;
\r
4101 error( RtAudioError::DRIVER_ERROR );
\r
4105 //-----------------------------------------------------------------------------
\r
4107 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4109 RtAudio::DeviceInfo info;
\r
4110 unsigned int captureDeviceCount = 0;
\r
4111 unsigned int renderDeviceCount = 0;
\r
4112 std::string defaultDeviceName;
\r
4113 bool isCaptureDevice = false;
\r
4115 PROPVARIANT deviceNameProp;
\r
4116 PROPVARIANT defaultDeviceNameProp;
\r
4118 IMMDeviceCollection* captureDevices = NULL;
\r
4119 IMMDeviceCollection* renderDevices = NULL;
\r
4120 IMMDevice* devicePtr = NULL;
\r
4121 IMMDevice* defaultDevicePtr = NULL;
\r
4122 IAudioClient* audioClient = NULL;
\r
4123 IPropertyStore* devicePropStore = NULL;
\r
4124 IPropertyStore* defaultDevicePropStore = NULL;
\r
4126 WAVEFORMATEX* deviceFormat = NULL;
\r
4127 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4130 info.probed = false;
\r
4132 // Count capture devices
\r
4133 errorText_.clear();
\r
4134 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4135 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4136 if ( FAILED( hr ) ) {
\r
4137 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4141 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4142 if ( FAILED( hr ) ) {
\r
4143 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4147 // Count render devices
\r
4148 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4149 if ( FAILED( hr ) ) {
\r
4150 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4154 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4155 if ( FAILED( hr ) ) {
\r
4156 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4160 // validate device index
\r
4161 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4162 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4163 errorType = RtAudioError::INVALID_USE;
\r
4167 // determine whether index falls within capture or render devices
\r
4168 if ( device >= renderDeviceCount ) {
\r
4169 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4170 if ( FAILED( hr ) ) {
\r
4171 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4174 isCaptureDevice = true;
\r
4177 hr = renderDevices->Item( device, &devicePtr );
\r
4178 if ( FAILED( hr ) ) {
\r
4179 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4182 isCaptureDevice = false;
\r
4185 // get default device name
\r
4186 if ( isCaptureDevice ) {
\r
4187 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4188 if ( FAILED( hr ) ) {
\r
4189 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4194 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4195 if ( FAILED( hr ) ) {
\r
4196 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4201 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4202 if ( FAILED( hr ) ) {
\r
4203 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4206 PropVariantInit( &defaultDeviceNameProp );
\r
4208 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4209 if ( FAILED( hr ) ) {
\r
4210 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4214 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4217 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4218 if ( FAILED( hr ) ) {
\r
4219 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4223 PropVariantInit( &deviceNameProp );
\r
4225 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4226 if ( FAILED( hr ) ) {
\r
4227 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4231 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4234 if ( isCaptureDevice ) {
\r
4235 info.isDefaultInput = info.name == defaultDeviceName;
\r
4236 info.isDefaultOutput = false;
\r
4239 info.isDefaultInput = false;
\r
4240 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4244 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4245 if ( FAILED( hr ) ) {
\r
4246 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4250 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4251 if ( FAILED( hr ) ) {
\r
4252 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4256 if ( isCaptureDevice ) {
\r
4257 info.inputChannels = deviceFormat->nChannels;
\r
4258 info.outputChannels = 0;
\r
4259 info.duplexChannels = 0;
\r
4262 info.inputChannels = 0;
\r
4263 info.outputChannels = deviceFormat->nChannels;
\r
4264 info.duplexChannels = 0;
\r
4268 info.sampleRates.clear();
\r
4270 // allow support for all sample rates as we have a built-in sample rate converter
\r
4271 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4272 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4274 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4277 info.nativeFormats = 0;
\r
4279 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4280 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4281 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4283 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4284 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4286 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4287 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4290 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4291 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4292 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4294 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4295 info.nativeFormats |= RTAUDIO_SINT8;
\r
4297 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4298 info.nativeFormats |= RTAUDIO_SINT16;
\r
4300 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4301 info.nativeFormats |= RTAUDIO_SINT24;
\r
4303 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4304 info.nativeFormats |= RTAUDIO_SINT32;
\r
4309 info.probed = true;
\r
4312 // release all references
\r
4313 PropVariantClear( &deviceNameProp );
\r
4314 PropVariantClear( &defaultDeviceNameProp );
\r
4316 SAFE_RELEASE( captureDevices );
\r
4317 SAFE_RELEASE( renderDevices );
\r
4318 SAFE_RELEASE( devicePtr );
\r
4319 SAFE_RELEASE( defaultDevicePtr );
\r
4320 SAFE_RELEASE( audioClient );
\r
4321 SAFE_RELEASE( devicePropStore );
\r
4322 SAFE_RELEASE( defaultDevicePropStore );
\r
4324 CoTaskMemFree( deviceFormat );
\r
4325 CoTaskMemFree( closestMatchFormat );
\r
4327 if ( !errorText_.empty() )
\r
4328 error( errorType );
\r
4332 //-----------------------------------------------------------------------------
\r
4334 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4336 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4337 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4345 //-----------------------------------------------------------------------------
\r
4347 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4349 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4350 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4358 //-----------------------------------------------------------------------------
\r
4360 void RtApiWasapi::closeStream( void )
\r
4362 if ( stream_.state == STREAM_CLOSED ) {
\r
4363 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4364 error( RtAudioError::WARNING );
\r
4368 if ( stream_.state != STREAM_STOPPED )
\r
4371 // clean up stream memory
\r
4372 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4373 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4375 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4376 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4378 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4379 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4381 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4382 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4384 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4385 stream_.apiHandle = NULL;
\r
4387 for ( int i = 0; i < 2; i++ ) {
\r
4388 if ( stream_.userBuffer[i] ) {
\r
4389 free( stream_.userBuffer[i] );
\r
4390 stream_.userBuffer[i] = 0;
\r
4394 if ( stream_.deviceBuffer ) {
\r
4395 free( stream_.deviceBuffer );
\r
4396 stream_.deviceBuffer = 0;
\r
4399 // update stream state
\r
4400 stream_.state = STREAM_CLOSED;
\r
4403 //-----------------------------------------------------------------------------
\r
4405 void RtApiWasapi::startStream( void )
\r
4409 if ( stream_.state == STREAM_RUNNING ) {
\r
4410 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4411 error( RtAudioError::WARNING );
\r
4415 // update stream state
\r
4416 stream_.state = STREAM_RUNNING;
\r
4418 // create WASAPI stream thread
\r
4419 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4421 if ( !stream_.callbackInfo.thread ) {
\r
4422 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4423 error( RtAudioError::THREAD_ERROR );
\r
4426 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4427 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4431 //-----------------------------------------------------------------------------
\r
4433 void RtApiWasapi::stopStream( void )
\r
4437 if ( stream_.state == STREAM_STOPPED ) {
\r
4438 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4439 error( RtAudioError::WARNING );
\r
4443 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4444 stream_.state = STREAM_STOPPING;
\r
4446 // wait until stream thread is stopped
\r
4447 while( stream_.state != STREAM_STOPPED ) {
\r
4451 // Wait for the last buffer to play before stopping.
\r
4452 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4454 // stop capture client if applicable
\r
4455 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4456 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4457 if ( FAILED( hr ) ) {
\r
4458 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4459 error( RtAudioError::DRIVER_ERROR );
\r
4464 // stop render client if applicable
\r
4465 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4466 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4467 if ( FAILED( hr ) ) {
\r
4468 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4469 error( RtAudioError::DRIVER_ERROR );
\r
4474 // close thread handle
\r
4475 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4476 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4477 error( RtAudioError::THREAD_ERROR );
\r
4481 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4484 //-----------------------------------------------------------------------------
\r
4486 void RtApiWasapi::abortStream( void )
\r
4490 if ( stream_.state == STREAM_STOPPED ) {
\r
4491 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4492 error( RtAudioError::WARNING );
\r
4496 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4497 stream_.state = STREAM_STOPPING;
\r
4499 // wait until stream thread is stopped
\r
4500 while ( stream_.state != STREAM_STOPPED ) {
\r
4504 // stop capture client if applicable
\r
4505 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4506 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4507 if ( FAILED( hr ) ) {
\r
4508 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4509 error( RtAudioError::DRIVER_ERROR );
\r
4514 // stop render client if applicable
\r
4515 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4516 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4517 if ( FAILED( hr ) ) {
\r
4518 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4519 error( RtAudioError::DRIVER_ERROR );
\r
4524 // close thread handle
\r
4525 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4526 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4527 error( RtAudioError::THREAD_ERROR );
\r
4531 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4534 //-----------------------------------------------------------------------------
\r
4536 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4537 unsigned int firstChannel, unsigned int sampleRate,
\r
4538 RtAudioFormat format, unsigned int* bufferSize,
\r
4539 RtAudio::StreamOptions* options )
\r
4541 bool methodResult = FAILURE;
\r
4542 unsigned int captureDeviceCount = 0;
\r
4543 unsigned int renderDeviceCount = 0;
\r
4545 IMMDeviceCollection* captureDevices = NULL;
\r
4546 IMMDeviceCollection* renderDevices = NULL;
\r
4547 IMMDevice* devicePtr = NULL;
\r
4548 WAVEFORMATEX* deviceFormat = NULL;
\r
4549 unsigned int bufferBytes;
\r
4550 stream_.state = STREAM_STOPPED;
\r
4552 // create API Handle if not already created
\r
4553 if ( !stream_.apiHandle )
\r
4554 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4556 // Count capture devices
\r
4557 errorText_.clear();
\r
4558 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4559 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4560 if ( FAILED( hr ) ) {
\r
4561 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4565 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4566 if ( FAILED( hr ) ) {
\r
4567 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4571 // Count render devices
\r
4572 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4573 if ( FAILED( hr ) ) {
\r
4574 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4578 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4579 if ( FAILED( hr ) ) {
\r
4580 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4584 // validate device index
\r
4585 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4586 errorType = RtAudioError::INVALID_USE;
\r
4587 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4591 // determine whether index falls within capture or render devices
\r
4592 if ( device >= renderDeviceCount ) {
\r
4593 if ( mode != INPUT ) {
\r
4594 errorType = RtAudioError::INVALID_USE;
\r
4595 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4599 // retrieve captureAudioClient from devicePtr
\r
4600 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4602 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4603 if ( FAILED( hr ) ) {
\r
4604 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4608 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4609 NULL, ( void** ) &captureAudioClient );
\r
4610 if ( FAILED( hr ) ) {
\r
4611 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4615 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4616 if ( FAILED( hr ) ) {
\r
4617 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4621 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4622 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4625 if ( mode != OUTPUT ) {
\r
4626 errorType = RtAudioError::INVALID_USE;
\r
4627 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4631 // retrieve renderAudioClient from devicePtr
\r
4632 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4634 hr = renderDevices->Item( device, &devicePtr );
\r
4635 if ( FAILED( hr ) ) {
\r
4636 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4640 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4641 NULL, ( void** ) &renderAudioClient );
\r
4642 if ( FAILED( hr ) ) {
\r
4643 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4647 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4648 if ( FAILED( hr ) ) {
\r
4649 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4653 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4654 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4657 // fill stream data
\r
4658 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4659 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4660 stream_.mode = DUPLEX;
\r
4663 stream_.mode = mode;
\r
4666 stream_.device[mode] = device;
\r
4667 stream_.doByteSwap[mode] = false;
\r
4668 stream_.sampleRate = sampleRate;
\r
4669 stream_.bufferSize = *bufferSize;
\r
4670 stream_.nBuffers = 1;
\r
4671 stream_.nUserChannels[mode] = channels;
\r
4672 stream_.channelOffset[mode] = firstChannel;
\r
4673 stream_.userFormat = format;
\r
4674 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4676 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4677 stream_.userInterleaved = false;
\r
4679 stream_.userInterleaved = true;
\r
4680 stream_.deviceInterleaved[mode] = true;
\r
4682 // Set flags for buffer conversion.
\r
4683 stream_.doConvertBuffer[mode] = false;
\r
4684 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4685 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4686 stream_.doConvertBuffer[mode] = true;
\r
4687 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4688 stream_.nUserChannels[mode] > 1 )
\r
4689 stream_.doConvertBuffer[mode] = true;
\r
4691 if ( stream_.doConvertBuffer[mode] )
\r
4692 setConvertInfo( mode, 0 );
\r
4694 // Allocate necessary internal buffers
\r
4695 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4697 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4698 if ( !stream_.userBuffer[mode] ) {
\r
4699 errorType = RtAudioError::MEMORY_ERROR;
\r
4700 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4704 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4705 stream_.callbackInfo.priority = 15;
\r
4707 stream_.callbackInfo.priority = 0;
\r
4709 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4710 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4712 methodResult = SUCCESS;
\r
4716 SAFE_RELEASE( captureDevices );
\r
4717 SAFE_RELEASE( renderDevices );
\r
4718 SAFE_RELEASE( devicePtr );
\r
4719 CoTaskMemFree( deviceFormat );
\r
4721 // if method failed, close the stream
\r
4722 if ( methodResult == FAILURE )
\r
4725 if ( !errorText_.empty() )
\r
4726 error( errorType );
\r
4727 return methodResult;
\r
4730 //=============================================================================
\r
4732 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4735 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4740 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4743 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4748 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4751 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4756 //-----------------------------------------------------------------------------
\r
4758 void RtApiWasapi::wasapiThread()
\r
4760 // as this is a new thread, we must CoInitialize it
\r
4761 CoInitialize( NULL );
\r
4765 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4766 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4767 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4768 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4769 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4770 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4772 WAVEFORMATEX* captureFormat = NULL;
\r
4773 WAVEFORMATEX* renderFormat = NULL;
\r
4774 float captureSrRatio = 0.0f;
\r
4775 float renderSrRatio = 0.0f;
\r
4776 WasapiBuffer captureBuffer;
\r
4777 WasapiBuffer renderBuffer;
\r
4779 // declare local stream variables
\r
4780 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4781 BYTE* streamBuffer = NULL;
\r
4782 unsigned long captureFlags = 0;
\r
4783 unsigned int bufferFrameCount = 0;
\r
4784 unsigned int numFramesPadding = 0;
\r
4785 unsigned int convBufferSize = 0;
\r
4786 bool callbackPushed = false;
\r
4787 bool callbackPulled = false;
\r
4788 bool callbackStopped = false;
\r
4789 int callbackResult = 0;
\r
4791 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4792 char* convBuffer = NULL;
\r
4793 unsigned int convBuffSize = 0;
\r
4794 unsigned int deviceBuffSize = 0;
\r
4796 errorText_.clear();
\r
4797 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4799 // Attempt to assign "Pro Audio" characteristic to thread
\r
4800 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4802 DWORD taskIndex = 0;
\r
4803 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4804 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4805 FreeLibrary( AvrtDll );
\r
4808 // start capture stream if applicable
\r
4809 if ( captureAudioClient ) {
\r
4810 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4811 if ( FAILED( hr ) ) {
\r
4812 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4816 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4818 // initialize capture stream according to desire buffer size
\r
4819 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4820 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4822 if ( !captureClient ) {
\r
4823 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4824 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4825 desiredBufferPeriod,
\r
4826 desiredBufferPeriod,
\r
4829 if ( FAILED( hr ) ) {
\r
4830 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4834 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4835 ( void** ) &captureClient );
\r
4836 if ( FAILED( hr ) ) {
\r
4837 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4841 // configure captureEvent to trigger on every available capture buffer
\r
4842 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4843 if ( !captureEvent ) {
\r
4844 errorType = RtAudioError::SYSTEM_ERROR;
\r
4845 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4849 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4850 if ( FAILED( hr ) ) {
\r
4851 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4855 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4856 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4859 unsigned int inBufferSize = 0;
\r
4860 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4861 if ( FAILED( hr ) ) {
\r
4862 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4866 // scale outBufferSize according to stream->user sample rate ratio
\r
4867 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4868 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4870 // set captureBuffer size
\r
4871 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4873 // reset the capture stream
\r
4874 hr = captureAudioClient->Reset();
\r
4875 if ( FAILED( hr ) ) {
\r
4876 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4880 // start the capture stream
\r
4881 hr = captureAudioClient->Start();
\r
4882 if ( FAILED( hr ) ) {
\r
4883 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4888 // start render stream if applicable
\r
4889 if ( renderAudioClient ) {
\r
4890 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4891 if ( FAILED( hr ) ) {
\r
4892 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4896 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4898 // initialize render stream according to desire buffer size
\r
4899 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4900 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4902 if ( !renderClient ) {
\r
4903 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4904 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4905 desiredBufferPeriod,
\r
4906 desiredBufferPeriod,
\r
4909 if ( FAILED( hr ) ) {
\r
4910 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4914 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4915 ( void** ) &renderClient );
\r
4916 if ( FAILED( hr ) ) {
\r
4917 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4921 // configure renderEvent to trigger on every available render buffer
\r
4922 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4923 if ( !renderEvent ) {
\r
4924 errorType = RtAudioError::SYSTEM_ERROR;
\r
4925 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4929 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4930 if ( FAILED( hr ) ) {
\r
4931 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4935 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4936 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4939 unsigned int outBufferSize = 0;
\r
4940 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4941 if ( FAILED( hr ) ) {
\r
4942 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4946 // scale inBufferSize according to user->stream sample rate ratio
\r
4947 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4948 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4950 // set renderBuffer size
\r
4951 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4953 // reset the render stream
\r
4954 hr = renderAudioClient->Reset();
\r
4955 if ( FAILED( hr ) ) {
\r
4956 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4960 // start the render stream
\r
4961 hr = renderAudioClient->Start();
\r
4962 if ( FAILED( hr ) ) {
\r
4963 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4968 if ( stream_.mode == INPUT ) {
\r
4969 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4970 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4972 else if ( stream_.mode == OUTPUT ) {
\r
4973 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4974 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4976 else if ( stream_.mode == DUPLEX ) {
\r
4977 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4978 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4979 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4980 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4983 convBuffer = ( char* ) malloc( convBuffSize );
\r
4984 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4985 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4986 errorType = RtAudioError::MEMORY_ERROR;
\r
4987 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4991 // stream process loop
\r
4992 while ( stream_.state != STREAM_STOPPING ) {
\r
4993 if ( !callbackPulled ) {
\r
4996 // 1. Pull callback buffer from inputBuffer
\r
4997 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4998 // Convert callback buffer to user format
\r
5000 if ( captureAudioClient ) {
\r
5001 // Pull callback buffer from inputBuffer
\r
5002 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
5003 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
5004 stream_.deviceFormat[INPUT] );
\r
5006 if ( callbackPulled ) {
\r
5007 // Convert callback buffer to user sample rate
\r
5008 convertBufferWasapi( stream_.deviceBuffer,
\r
5010 stream_.nDeviceChannels[INPUT],
\r
5011 captureFormat->nSamplesPerSec,
\r
5012 stream_.sampleRate,
\r
5013 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
5015 stream_.deviceFormat[INPUT] );
\r
5017 if ( stream_.doConvertBuffer[INPUT] ) {
\r
5018 // Convert callback buffer to user format
\r
5019 convertBuffer( stream_.userBuffer[INPUT],
\r
5020 stream_.deviceBuffer,
\r
5021 stream_.convertInfo[INPUT] );
\r
5024 // no further conversion, simple copy deviceBuffer to userBuffer
\r
5025 memcpy( stream_.userBuffer[INPUT],
\r
5026 stream_.deviceBuffer,
\r
5027 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
5032 // if there is no capture stream, set callbackPulled flag
\r
5033 callbackPulled = true;
\r
5036 // Execute Callback
\r
5037 // ================
\r
5038 // 1. Execute user callback method
\r
5039 // 2. Handle return value from callback
\r
5041 // if callback has not requested the stream to stop
\r
5042 if ( callbackPulled && !callbackStopped ) {
\r
5043 // Execute user callback method
\r
5044 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
5045 stream_.userBuffer[INPUT],
\r
5046 stream_.bufferSize,
\r
5048 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
5049 stream_.callbackInfo.userData );
\r
5051 // Handle return value from callback
\r
5052 if ( callbackResult == 1 ) {
\r
5053 // instantiate a thread to stop this thread
\r
5054 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
5055 if ( !threadHandle ) {
\r
5056 errorType = RtAudioError::THREAD_ERROR;
\r
5057 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
5060 else if ( !CloseHandle( threadHandle ) ) {
\r
5061 errorType = RtAudioError::THREAD_ERROR;
\r
5062 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
5066 callbackStopped = true;
\r
5068 else if ( callbackResult == 2 ) {
\r
5069 // instantiate a thread to stop this thread
\r
5070 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
5071 if ( !threadHandle ) {
\r
5072 errorType = RtAudioError::THREAD_ERROR;
\r
5073 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5076 else if ( !CloseHandle( threadHandle ) ) {
\r
5077 errorType = RtAudioError::THREAD_ERROR;
\r
5078 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5082 callbackStopped = true;
\r
5087 // Callback Output
\r
5088 // ===============
\r
5089 // 1. Convert callback buffer to stream format
\r
5090 // 2. Convert callback buffer to stream sample rate and channel count
\r
5091 // 3. Push callback buffer into outputBuffer
\r
5093 if ( renderAudioClient && callbackPulled ) {
\r
5094 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5095 // Convert callback buffer to stream format
\r
5096 convertBuffer( stream_.deviceBuffer,
\r
5097 stream_.userBuffer[OUTPUT],
\r
5098 stream_.convertInfo[OUTPUT] );
\r
5102 // Convert callback buffer to stream sample rate
\r
5103 convertBufferWasapi( convBuffer,
\r
5104 stream_.deviceBuffer,
\r
5105 stream_.nDeviceChannels[OUTPUT],
\r
5106 stream_.sampleRate,
\r
5107 renderFormat->nSamplesPerSec,
\r
5108 stream_.bufferSize,
\r
5110 stream_.deviceFormat[OUTPUT] );
\r
5112 // Push callback buffer into outputBuffer
\r
5113 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5114 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5115 stream_.deviceFormat[OUTPUT] );
\r
5118 // if there is no render stream, set callbackPushed flag
\r
5119 callbackPushed = true;
\r
5124 // 1. Get capture buffer from stream
\r
5125 // 2. Push capture buffer into inputBuffer
\r
5126 // 3. If 2. was successful: Release capture buffer
\r
5128 if ( captureAudioClient ) {
\r
5129 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5130 if ( !callbackPulled ) {
\r
5131 WaitForSingleObject( captureEvent, INFINITE );
\r
5134 // Get capture buffer from stream
\r
5135 hr = captureClient->GetBuffer( &streamBuffer,
\r
5136 &bufferFrameCount,
\r
5137 &captureFlags, NULL, NULL );
\r
5138 if ( FAILED( hr ) ) {
\r
5139 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5143 if ( bufferFrameCount != 0 ) {
\r
5144 // Push capture buffer into inputBuffer
\r
5145 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5146 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5147 stream_.deviceFormat[INPUT] ) )
\r
5149 // Release capture buffer
\r
5150 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5151 if ( FAILED( hr ) ) {
\r
5152 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5158 // Inform WASAPI that capture was unsuccessful
\r
5159 hr = captureClient->ReleaseBuffer( 0 );
\r
5160 if ( FAILED( hr ) ) {
\r
5161 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5168 // Inform WASAPI that capture was unsuccessful
\r
5169 hr = captureClient->ReleaseBuffer( 0 );
\r
5170 if ( FAILED( hr ) ) {
\r
5171 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5179 // 1. Get render buffer from stream
\r
5180 // 2. Pull next buffer from outputBuffer
\r
5181 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5182 // Release render buffer
\r
5184 if ( renderAudioClient ) {
\r
5185 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5186 if ( callbackPulled && !callbackPushed ) {
\r
5187 WaitForSingleObject( renderEvent, INFINITE );
\r
5190 // Get render buffer from stream
\r
5191 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5192 if ( FAILED( hr ) ) {
\r
5193 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5197 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5198 if ( FAILED( hr ) ) {
\r
5199 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5203 bufferFrameCount -= numFramesPadding;
\r
5205 if ( bufferFrameCount != 0 ) {
\r
5206 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5207 if ( FAILED( hr ) ) {
\r
5208 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5212 // Pull next buffer from outputBuffer
\r
5213 // Fill render buffer with next buffer
\r
5214 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5215 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5216 stream_.deviceFormat[OUTPUT] ) )
\r
5218 // Release render buffer
\r
5219 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5220 if ( FAILED( hr ) ) {
\r
5221 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5227 // Inform WASAPI that render was unsuccessful
\r
5228 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5229 if ( FAILED( hr ) ) {
\r
5230 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5237 // Inform WASAPI that render was unsuccessful
\r
5238 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5239 if ( FAILED( hr ) ) {
\r
5240 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5246 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5247 if ( callbackPushed ) {
\r
5248 callbackPulled = false;
\r
5249 // tick stream time
\r
5250 RtApi::tickStreamTime();
\r
5257 CoTaskMemFree( captureFormat );
\r
5258 CoTaskMemFree( renderFormat );
\r
5260 free ( convBuffer );
\r
5264 // update stream state
\r
5265 stream_.state = STREAM_STOPPED;
\r
5267 if ( errorText_.empty() )
\r
5270 error( errorType );
\r
5273 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5277 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5279 // Modified by Robin Davies, October 2005
\r
5280 // - Improvements to DirectX pointer chasing.
\r
5281 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5282 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5283 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5284 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5286 #include <dsound.h>
\r
5287 #include <assert.h>
\r
5288 #include <algorithm>
\r
5290 #if defined(__MINGW32__)
\r
5291 // missing from latest mingw winapi
\r
5292 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5293 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5294 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5295 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5298 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5300 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5301 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5304 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5306 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5307 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5308 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5309 return pointer >= earlierPointer && pointer < laterPointer;
\r
5312 // A structure to hold various information related to the DirectSound
\r
5313 // API implementation.
\r
5315 unsigned int drainCounter; // Tracks callback counts when draining
\r
5316 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5320 UINT bufferPointer[2];
\r
5321 DWORD dsBufferSize[2];
\r
5322 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5326 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5329 // Declarations for utility functions, callbacks, and structures
\r
5330 // specific to the DirectSound implementation.
\r
5331 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5332 LPCTSTR description,
\r
5334 LPVOID lpContext );
\r
5336 static const char* getErrorString( int code );
\r
5338 static unsigned __stdcall callbackHandler( void *ptr );
\r
5347 : found(false) { validId[0] = false; validId[1] = false; }
\r
5350 struct DsProbeData {
\r
5352 std::vector<struct DsDevice>* dsDevices;
\r
5355 RtApiDs :: RtApiDs()
\r
5357 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5358 // accept whatever the mainline chose for a threading model.
\r
5359 coInitialized_ = false;
\r
5360 HRESULT hr = CoInitialize( NULL );
\r
5361 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5364 RtApiDs :: ~RtApiDs()
\r
5366 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5367 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5370 // The DirectSound default output is always the first device.
\r
5371 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5376 // The DirectSound default input is always the first input device,
\r
5377 // which is the first capture device enumerated.
\r
5378 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5383 unsigned int RtApiDs :: getDeviceCount( void )
\r
5385 // Set query flag for previously found devices to false, so that we
\r
5386 // can check for any devices that have disappeared.
\r
5387 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5388 dsDevices[i].found = false;
\r
5390 // Query DirectSound devices.
\r
5391 struct DsProbeData probeInfo;
\r
5392 probeInfo.isInput = false;
\r
5393 probeInfo.dsDevices = &dsDevices;
\r
5394 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5395 if ( FAILED( result ) ) {
\r
5396 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5397 errorText_ = errorStream_.str();
\r
5398 error( RtAudioError::WARNING );
\r
5401 // Query DirectSoundCapture devices.
\r
5402 probeInfo.isInput = true;
\r
5403 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5404 if ( FAILED( result ) ) {
\r
5405 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5406 errorText_ = errorStream_.str();
\r
5407 error( RtAudioError::WARNING );
\r
5410 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5411 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5412 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5416 return static_cast<unsigned int>(dsDevices.size());
\r
5419 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5421 RtAudio::DeviceInfo info;
\r
5422 info.probed = false;
\r
5424 if ( dsDevices.size() == 0 ) {
\r
5425 // Force a query of all devices
\r
5427 if ( dsDevices.size() == 0 ) {
\r
5428 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5429 error( RtAudioError::INVALID_USE );
\r
5434 if ( device >= dsDevices.size() ) {
\r
5435 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5436 error( RtAudioError::INVALID_USE );
\r
5441 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5443 LPDIRECTSOUND output;
\r
5445 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5446 if ( FAILED( result ) ) {
\r
5447 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5448 errorText_ = errorStream_.str();
\r
5449 error( RtAudioError::WARNING );
\r
5453 outCaps.dwSize = sizeof( outCaps );
\r
5454 result = output->GetCaps( &outCaps );
\r
5455 if ( FAILED( result ) ) {
\r
5456 output->Release();
\r
5457 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5458 errorText_ = errorStream_.str();
\r
5459 error( RtAudioError::WARNING );
\r
5463 // Get output channel information.
\r
5464 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5466 // Get sample rate information.
\r
5467 info.sampleRates.clear();
\r
5468 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5469 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5470 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5471 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5473 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5474 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5478 // Get format information.
\r
5479 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5480 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5482 output->Release();
\r
5484 if ( getDefaultOutputDevice() == device )
\r
5485 info.isDefaultOutput = true;
\r
5487 if ( dsDevices[ device ].validId[1] == false ) {
\r
5488 info.name = dsDevices[ device ].name;
\r
5489 info.probed = true;
\r
5495 LPDIRECTSOUNDCAPTURE input;
\r
5496 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5497 if ( FAILED( result ) ) {
\r
5498 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5499 errorText_ = errorStream_.str();
\r
5500 error( RtAudioError::WARNING );
\r
5505 inCaps.dwSize = sizeof( inCaps );
\r
5506 result = input->GetCaps( &inCaps );
\r
5507 if ( FAILED( result ) ) {
\r
5509 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5510 errorText_ = errorStream_.str();
\r
5511 error( RtAudioError::WARNING );
\r
5515 // Get input channel information.
\r
5516 info.inputChannels = inCaps.dwChannels;
\r
5518 // Get sample rate and format information.
\r
5519 std::vector<unsigned int> rates;
\r
5520 if ( inCaps.dwChannels >= 2 ) {
\r
5521 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5522 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5523 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5524 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5525 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5526 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5527 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5528 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5530 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5531 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5532 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5533 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5534 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5536 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5537 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5538 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5539 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5540 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5543 else if ( inCaps.dwChannels == 1 ) {
\r
5544 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5545 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5546 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5547 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5548 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5549 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5550 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5551 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5553 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5554 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5555 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5556 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5557 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5559 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5560 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5561 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5562 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5563 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5566 else info.inputChannels = 0; // technically, this would be an error
\r
5570 if ( info.inputChannels == 0 ) return info;
\r
5572 // Copy the supported rates to the info structure but avoid duplication.
\r
5574 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5576 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5577 if ( rates[i] == info.sampleRates[j] ) {
\r
5582 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5584 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5586 // If device opens for both playback and capture, we determine the channels.
\r
5587 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5588 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5590 if ( device == 0 ) info.isDefaultInput = true;
\r
5592 // Copy name and return.
\r
5593 info.name = dsDevices[ device ].name;
\r
5594 info.probed = true;
\r
5598 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5599 unsigned int firstChannel, unsigned int sampleRate,
\r
5600 RtAudioFormat format, unsigned int *bufferSize,
\r
5601 RtAudio::StreamOptions *options )
\r
5603 if ( channels + firstChannel > 2 ) {
\r
5604 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5608 size_t nDevices = dsDevices.size();
\r
5609 if ( nDevices == 0 ) {
\r
5610 // This should not happen because a check is made before this function is called.
\r
5611 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5615 if ( device >= nDevices ) {
\r
5616 // This should not happen because a check is made before this function is called.
\r
5617 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5621 if ( mode == OUTPUT ) {
\r
5622 if ( dsDevices[ device ].validId[0] == false ) {
\r
5623 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5624 errorText_ = errorStream_.str();
\r
5628 else { // mode == INPUT
\r
5629 if ( dsDevices[ device ].validId[1] == false ) {
\r
5630 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5631 errorText_ = errorStream_.str();
\r
5636 // According to a note in PortAudio, using GetDesktopWindow()
\r
5637 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5638 // that occur when the application's window is not the foreground
\r
5639 // window. Also, if the application window closes before the
\r
5640 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5641 // problems when using GetDesktopWindow() but it seems fine now
\r
5642 // (January 2010). I'll leave it commented here.
\r
5643 // HWND hWnd = GetForegroundWindow();
\r
5644 HWND hWnd = GetDesktopWindow();
\r
5646 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5647 // two. This is a judgement call and a value of two is probably too
\r
5648 // low for capture, but it should work for playback.
\r
5650 if ( options ) nBuffers = options->numberOfBuffers;
\r
5651 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5652 if ( nBuffers < 2 ) nBuffers = 3;
\r
5654 // Check the lower range of the user-specified buffer size and set
\r
5655 // (arbitrarily) to a lower bound of 32.
\r
5656 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5658 // Create the wave format structure. The data format setting will
\r
5659 // be determined later.
\r
5660 WAVEFORMATEX waveFormat;
\r
5661 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5662 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5663 waveFormat.nChannels = channels + firstChannel;
\r
5664 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5666 // Determine the device buffer size. By default, we'll use the value
\r
5667 // defined above (32K), but we will grow it to make allowances for
\r
5668 // very large software buffer sizes.
\r
5669 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5670 DWORD dsPointerLeadTime = 0;
\r
5672 void *ohandle = 0, *bhandle = 0;
\r
5674 if ( mode == OUTPUT ) {
\r
5676 LPDIRECTSOUND output;
\r
5677 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5678 if ( FAILED( result ) ) {
\r
5679 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5680 errorText_ = errorStream_.str();
\r
5685 outCaps.dwSize = sizeof( outCaps );
\r
5686 result = output->GetCaps( &outCaps );
\r
5687 if ( FAILED( result ) ) {
\r
5688 output->Release();
\r
5689 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5690 errorText_ = errorStream_.str();
\r
5694 // Check channel information.
\r
5695 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5696 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5697 errorText_ = errorStream_.str();
\r
5701 // Check format information. Use 16-bit format unless not
\r
5702 // supported or user requests 8-bit.
\r
5703 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5704 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5705 waveFormat.wBitsPerSample = 16;
\r
5706 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5709 waveFormat.wBitsPerSample = 8;
\r
5710 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5712 stream_.userFormat = format;
\r
5714 // Update wave format structure and buffer information.
\r
5715 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5716 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5717 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5719 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5720 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5721 dsBufferSize *= 2;
\r
5723 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5724 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5725 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5726 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5727 if ( FAILED( result ) ) {
\r
5728 output->Release();
\r
5729 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5730 errorText_ = errorStream_.str();
\r
5734 // Even though we will write to the secondary buffer, we need to
\r
5735 // access the primary buffer to set the correct output format
\r
5736 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5737 // buffer description.
\r
5738 DSBUFFERDESC bufferDescription;
\r
5739 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5740 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5741 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5743 // Obtain the primary buffer
\r
5744 LPDIRECTSOUNDBUFFER buffer;
\r
5745 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5746 if ( FAILED( result ) ) {
\r
5747 output->Release();
\r
5748 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5749 errorText_ = errorStream_.str();
\r
5753 // Set the primary DS buffer sound format.
\r
5754 result = buffer->SetFormat( &waveFormat );
\r
5755 if ( FAILED( result ) ) {
\r
5756 output->Release();
\r
5757 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5758 errorText_ = errorStream_.str();
\r
5762 // Setup the secondary DS buffer description.
\r
5763 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5764 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5765 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5766 DSBCAPS_GLOBALFOCUS |
\r
5767 DSBCAPS_GETCURRENTPOSITION2 |
\r
5768 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5769 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5770 bufferDescription.lpwfxFormat = &waveFormat;
\r
5772 // Try to create the secondary DS buffer. If that doesn't work,
\r
5773 // try to use software mixing. Otherwise, there's a problem.
\r
5774 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5775 if ( FAILED( result ) ) {
\r
5776 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5777 DSBCAPS_GLOBALFOCUS |
\r
5778 DSBCAPS_GETCURRENTPOSITION2 |
\r
5779 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5780 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5781 if ( FAILED( result ) ) {
\r
5782 output->Release();
\r
5783 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5784 errorText_ = errorStream_.str();
\r
5789 // Get the buffer size ... might be different from what we specified.
\r
5791 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5792 result = buffer->GetCaps( &dsbcaps );
\r
5793 if ( FAILED( result ) ) {
\r
5794 output->Release();
\r
5795 buffer->Release();
\r
5796 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5797 errorText_ = errorStream_.str();
\r
5801 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5803 // Lock the DS buffer
\r
5806 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5807 if ( FAILED( result ) ) {
\r
5808 output->Release();
\r
5809 buffer->Release();
\r
5810 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5811 errorText_ = errorStream_.str();
\r
5815 // Zero the DS buffer
\r
5816 ZeroMemory( audioPtr, dataLen );
\r
5818 // Unlock the DS buffer
\r
5819 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5820 if ( FAILED( result ) ) {
\r
5821 output->Release();
\r
5822 buffer->Release();
\r
5823 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5824 errorText_ = errorStream_.str();
\r
5828 ohandle = (void *) output;
\r
5829 bhandle = (void *) buffer;
\r
5832 if ( mode == INPUT ) {
\r
5834 LPDIRECTSOUNDCAPTURE input;
\r
5835 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5836 if ( FAILED( result ) ) {
\r
5837 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5838 errorText_ = errorStream_.str();
\r
5843 inCaps.dwSize = sizeof( inCaps );
\r
5844 result = input->GetCaps( &inCaps );
\r
5845 if ( FAILED( result ) ) {
\r
5847 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5848 errorText_ = errorStream_.str();
\r
5852 // Check channel information.
\r
5853 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5854 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5858 // Check format information. Use 16-bit format unless user
\r
5859 // requests 8-bit.
\r
5860 DWORD deviceFormats;
\r
5861 if ( channels + firstChannel == 2 ) {
\r
5862 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5863 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5864 waveFormat.wBitsPerSample = 8;
\r
5865 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5867 else { // assume 16-bit is supported
\r
5868 waveFormat.wBitsPerSample = 16;
\r
5869 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5872 else { // channel == 1
\r
5873 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5874 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5875 waveFormat.wBitsPerSample = 8;
\r
5876 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5878 else { // assume 16-bit is supported
\r
5879 waveFormat.wBitsPerSample = 16;
\r
5880 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5883 stream_.userFormat = format;
\r
5885 // Update wave format structure and buffer information.
\r
5886 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5887 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5888 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5890 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5891 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5892 dsBufferSize *= 2;
\r
5894 // Setup the secondary DS buffer description.
\r
5895 DSCBUFFERDESC bufferDescription;
\r
5896 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5897 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5898 bufferDescription.dwFlags = 0;
\r
5899 bufferDescription.dwReserved = 0;
\r
5900 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5901 bufferDescription.lpwfxFormat = &waveFormat;
\r
5903 // Create the capture buffer.
\r
5904 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5905 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5906 if ( FAILED( result ) ) {
\r
5908 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5909 errorText_ = errorStream_.str();
\r
5913 // Get the buffer size ... might be different from what we specified.
\r
5914 DSCBCAPS dscbcaps;
\r
5915 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5916 result = buffer->GetCaps( &dscbcaps );
\r
5917 if ( FAILED( result ) ) {
\r
5919 buffer->Release();
\r
5920 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5921 errorText_ = errorStream_.str();
\r
5925 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5927 // NOTE: We could have a problem here if this is a duplex stream
\r
5928 // and the play and capture hardware buffer sizes are different
\r
5929 // (I'm actually not sure if that is a problem or not).
\r
5930 // Currently, we are not verifying that.
\r
5932 // Lock the capture buffer
\r
5935 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5936 if ( FAILED( result ) ) {
\r
5938 buffer->Release();
\r
5939 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5940 errorText_ = errorStream_.str();
\r
5944 // Zero the buffer
\r
5945 ZeroMemory( audioPtr, dataLen );
\r
5947 // Unlock the buffer
\r
5948 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5949 if ( FAILED( result ) ) {
\r
5951 buffer->Release();
\r
5952 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5953 errorText_ = errorStream_.str();
\r
5957 ohandle = (void *) input;
\r
5958 bhandle = (void *) buffer;
\r
5961 // Set various stream parameters
\r
5962 DsHandle *handle = 0;
\r
5963 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5964 stream_.nUserChannels[mode] = channels;
\r
5965 stream_.bufferSize = *bufferSize;
\r
5966 stream_.channelOffset[mode] = firstChannel;
\r
5967 stream_.deviceInterleaved[mode] = true;
\r
5968 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5969 else stream_.userInterleaved = true;
\r
5971 // Set flag for buffer conversion
\r
5972 stream_.doConvertBuffer[mode] = false;
\r
5973 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5974 stream_.doConvertBuffer[mode] = true;
\r
5975 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5976 stream_.doConvertBuffer[mode] = true;
\r
5977 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5978 stream_.nUserChannels[mode] > 1 )
\r
5979 stream_.doConvertBuffer[mode] = true;
\r
5981 // Allocate necessary internal buffers
\r
5982 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5983 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5984 if ( stream_.userBuffer[mode] == NULL ) {
\r
5985 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5989 if ( stream_.doConvertBuffer[mode] ) {
\r
5991 bool makeBuffer = true;
\r
5992 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5993 if ( mode == INPUT ) {
\r
5994 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5995 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5996 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
6000 if ( makeBuffer ) {
\r
6001 bufferBytes *= *bufferSize;
\r
6002 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6003 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6004 if ( stream_.deviceBuffer == NULL ) {
\r
6005 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
6011 // Allocate our DsHandle structures for the stream.
\r
6012 if ( stream_.apiHandle == 0 ) {
\r
6014 handle = new DsHandle;
\r
6016 catch ( std::bad_alloc& ) {
\r
6017 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
6021 // Create a manual-reset event.
\r
6022 handle->condition = CreateEvent( NULL, // no security
\r
6023 TRUE, // manual-reset
\r
6024 FALSE, // non-signaled initially
\r
6025 NULL ); // unnamed
\r
6026 stream_.apiHandle = (void *) handle;
\r
6029 handle = (DsHandle *) stream_.apiHandle;
\r
6030 handle->id[mode] = ohandle;
\r
6031 handle->buffer[mode] = bhandle;
\r
6032 handle->dsBufferSize[mode] = dsBufferSize;
\r
6033 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
6035 stream_.device[mode] = device;
\r
6036 stream_.state = STREAM_STOPPED;
\r
6037 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
6038 // We had already set up an output stream.
\r
6039 stream_.mode = DUPLEX;
\r
6041 stream_.mode = mode;
\r
6042 stream_.nBuffers = nBuffers;
\r
6043 stream_.sampleRate = sampleRate;
\r
6045 // Setup the buffer conversion information structure.
\r
6046 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6048 // Setup the callback thread.
\r
6049 if ( stream_.callbackInfo.isRunning == false ) {
\r
6050 unsigned threadId;
\r
6051 stream_.callbackInfo.isRunning = true;
\r
6052 stream_.callbackInfo.object = (void *) this;
\r
6053 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
6054 &stream_.callbackInfo, 0, &threadId );
\r
6055 if ( stream_.callbackInfo.thread == 0 ) {
\r
6056 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
6060 // Boost DS thread priority
\r
6061 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
6067 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6068 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6069 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6070 if ( buffer ) buffer->Release();
\r
6071 object->Release();
\r
6073 if ( handle->buffer[1] ) {
\r
6074 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6075 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6076 if ( buffer ) buffer->Release();
\r
6077 object->Release();
\r
6079 CloseHandle( handle->condition );
\r
6081 stream_.apiHandle = 0;
\r
6084 for ( int i=0; i<2; i++ ) {
\r
6085 if ( stream_.userBuffer[i] ) {
\r
6086 free( stream_.userBuffer[i] );
\r
6087 stream_.userBuffer[i] = 0;
\r
6091 if ( stream_.deviceBuffer ) {
\r
6092 free( stream_.deviceBuffer );
\r
6093 stream_.deviceBuffer = 0;
\r
6096 stream_.state = STREAM_CLOSED;
\r
6100 void RtApiDs :: closeStream()
\r
6102 if ( stream_.state == STREAM_CLOSED ) {
\r
6103 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6104 error( RtAudioError::WARNING );
\r
6108 // Stop the callback thread.
\r
6109 stream_.callbackInfo.isRunning = false;
\r
6110 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6111 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6113 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6115 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6116 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6117 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6120 buffer->Release();
\r
6122 object->Release();
\r
6124 if ( handle->buffer[1] ) {
\r
6125 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6126 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6129 buffer->Release();
\r
6131 object->Release();
\r
6133 CloseHandle( handle->condition );
\r
6135 stream_.apiHandle = 0;
\r
6138 for ( int i=0; i<2; i++ ) {
\r
6139 if ( stream_.userBuffer[i] ) {
\r
6140 free( stream_.userBuffer[i] );
\r
6141 stream_.userBuffer[i] = 0;
\r
6145 if ( stream_.deviceBuffer ) {
\r
6146 free( stream_.deviceBuffer );
\r
6147 stream_.deviceBuffer = 0;
\r
6150 stream_.mode = UNINITIALIZED;
\r
6151 stream_.state = STREAM_CLOSED;
\r
6154 void RtApiDs :: startStream()
\r
6157 if ( stream_.state == STREAM_RUNNING ) {
\r
6158 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6159 error( RtAudioError::WARNING );
\r
6163 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6165 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6166 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6167 // this is already in effect.
\r
6168 timeBeginPeriod( 1 );
\r
6170 buffersRolling = false;
\r
6171 duplexPrerollBytes = 0;
\r
6173 if ( stream_.mode == DUPLEX ) {
\r
6174 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6175 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6178 HRESULT result = 0;
\r
6179 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6181 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6182 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6183 if ( FAILED( result ) ) {
\r
6184 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6185 errorText_ = errorStream_.str();
\r
6190 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6192 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6193 result = buffer->Start( DSCBSTART_LOOPING );
\r
6194 if ( FAILED( result ) ) {
\r
6195 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6196 errorText_ = errorStream_.str();
\r
6201 handle->drainCounter = 0;
\r
6202 handle->internalDrain = false;
\r
6203 ResetEvent( handle->condition );
\r
6204 stream_.state = STREAM_RUNNING;
\r
6207 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6210 void RtApiDs :: stopStream()
\r
6213 if ( stream_.state == STREAM_STOPPED ) {
\r
6214 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6215 error( RtAudioError::WARNING );
\r
6219 HRESULT result = 0;
\r
6222 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6223 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6224 if ( handle->drainCounter == 0 ) {
\r
6225 handle->drainCounter = 2;
\r
6226 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6229 stream_.state = STREAM_STOPPED;
\r
6231 MUTEX_LOCK( &stream_.mutex );
\r
6233 // Stop the buffer and clear memory
\r
6234 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6235 result = buffer->Stop();
\r
6236 if ( FAILED( result ) ) {
\r
6237 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6238 errorText_ = errorStream_.str();
\r
6242 // Lock the buffer and clear it so that if we start to play again,
\r
6243 // we won't have old data playing.
\r
6244 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6245 if ( FAILED( result ) ) {
\r
6246 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6247 errorText_ = errorStream_.str();
\r
6251 // Zero the DS buffer
\r
6252 ZeroMemory( audioPtr, dataLen );
\r
6254 // Unlock the DS buffer
\r
6255 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6256 if ( FAILED( result ) ) {
\r
6257 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6258 errorText_ = errorStream_.str();
\r
6262 // If we start playing again, we must begin at beginning of buffer.
\r
6263 handle->bufferPointer[0] = 0;
\r
6266 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6267 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6271 stream_.state = STREAM_STOPPED;
\r
6273 if ( stream_.mode != DUPLEX )
\r
6274 MUTEX_LOCK( &stream_.mutex );
\r
6276 result = buffer->Stop();
\r
6277 if ( FAILED( result ) ) {
\r
6278 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6279 errorText_ = errorStream_.str();
\r
6283 // Lock the buffer and clear it so that if we start to play again,
\r
6284 // we won't have old data playing.
\r
6285 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6286 if ( FAILED( result ) ) {
\r
6287 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6288 errorText_ = errorStream_.str();
\r
6292 // Zero the DS buffer
\r
6293 ZeroMemory( audioPtr, dataLen );
\r
6295 // Unlock the DS buffer
\r
6296 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6297 if ( FAILED( result ) ) {
\r
6298 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6299 errorText_ = errorStream_.str();
\r
6303 // If we start recording again, we must begin at beginning of buffer.
\r
6304 handle->bufferPointer[1] = 0;
\r
6308 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6309 MUTEX_UNLOCK( &stream_.mutex );
\r
6311 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6314 void RtApiDs :: abortStream()
\r
6317 if ( stream_.state == STREAM_STOPPED ) {
\r
6318 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6319 error( RtAudioError::WARNING );
\r
6323 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6324 handle->drainCounter = 2;
\r
6329 void RtApiDs :: callbackEvent()
\r
6331 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6332 Sleep( 50 ); // sleep 50 milliseconds
\r
6336 if ( stream_.state == STREAM_CLOSED ) {
\r
6337 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6338 error( RtAudioError::WARNING );
\r
6342 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6343 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6345 // Check if we were draining the stream and signal is finished.
\r
6346 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6348 stream_.state = STREAM_STOPPING;
\r
6349 if ( handle->internalDrain == false )
\r
6350 SetEvent( handle->condition );
\r
6356 // Invoke user callback to get fresh output data UNLESS we are
\r
6357 // draining stream.
\r
6358 if ( handle->drainCounter == 0 ) {
\r
6359 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6360 double streamTime = getStreamTime();
\r
6361 RtAudioStreamStatus status = 0;
\r
6362 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6363 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6364 handle->xrun[0] = false;
\r
6366 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6367 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6368 handle->xrun[1] = false;
\r
6370 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6371 stream_.bufferSize, streamTime, status, info->userData );
\r
6372 if ( cbReturnValue == 2 ) {
\r
6373 stream_.state = STREAM_STOPPING;
\r
6374 handle->drainCounter = 2;
\r
6378 else if ( cbReturnValue == 1 ) {
\r
6379 handle->drainCounter = 1;
\r
6380 handle->internalDrain = true;
\r
6385 DWORD currentWritePointer, safeWritePointer;
\r
6386 DWORD currentReadPointer, safeReadPointer;
\r
6387 UINT nextWritePointer;
\r
6389 LPVOID buffer1 = NULL;
\r
6390 LPVOID buffer2 = NULL;
\r
6391 DWORD bufferSize1 = 0;
\r
6392 DWORD bufferSize2 = 0;
\r
6397 MUTEX_LOCK( &stream_.mutex );
\r
6398 if ( stream_.state == STREAM_STOPPED ) {
\r
6399 MUTEX_UNLOCK( &stream_.mutex );
\r
6403 if ( buffersRolling == false ) {
\r
6404 if ( stream_.mode == DUPLEX ) {
\r
6405 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6407 // It takes a while for the devices to get rolling. As a result,
\r
6408 // there's no guarantee that the capture and write device pointers
\r
6409 // will move in lockstep. Wait here for both devices to start
\r
6410 // rolling, and then set our buffer pointers accordingly.
\r
6411 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6412 // bytes later than the write buffer.
\r
6414 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6415 // take place between the two GetCurrentPosition calls... but I'm
\r
6416 // really not sure how to solve the problem. Temporarily boost to
\r
6417 // Realtime priority, maybe; but I'm not sure what priority the
\r
6418 // DirectSound service threads run at. We *should* be roughly
\r
6419 // within a ms or so of correct.
\r
6421 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6422 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6424 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6426 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6427 if ( FAILED( result ) ) {
\r
6428 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6429 errorText_ = errorStream_.str();
\r
6430 MUTEX_UNLOCK( &stream_.mutex );
\r
6431 error( RtAudioError::SYSTEM_ERROR );
\r
6434 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6435 if ( FAILED( result ) ) {
\r
6436 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6437 errorText_ = errorStream_.str();
\r
6438 MUTEX_UNLOCK( &stream_.mutex );
\r
6439 error( RtAudioError::SYSTEM_ERROR );
\r
6443 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6444 if ( FAILED( result ) ) {
\r
6445 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6446 errorText_ = errorStream_.str();
\r
6447 MUTEX_UNLOCK( &stream_.mutex );
\r
6448 error( RtAudioError::SYSTEM_ERROR );
\r
6451 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6452 if ( FAILED( result ) ) {
\r
6453 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6454 errorText_ = errorStream_.str();
\r
6455 MUTEX_UNLOCK( &stream_.mutex );
\r
6456 error( RtAudioError::SYSTEM_ERROR );
\r
6459 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6463 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6465 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6466 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6467 handle->bufferPointer[1] = safeReadPointer;
\r
6469 else if ( stream_.mode == OUTPUT ) {
\r
6471 // Set the proper nextWritePosition after initial startup.
\r
6472 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6473 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6474 if ( FAILED( result ) ) {
\r
6475 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6476 errorText_ = errorStream_.str();
\r
6477 MUTEX_UNLOCK( &stream_.mutex );
\r
6478 error( RtAudioError::SYSTEM_ERROR );
\r
6481 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6482 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6485 buffersRolling = true;
\r
6488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6490 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6492 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6493 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6494 bufferBytes *= formatBytes( stream_.userFormat );
\r
6495 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6498 // Setup parameters and do buffer conversion if necessary.
\r
6499 if ( stream_.doConvertBuffer[0] ) {
\r
6500 buffer = stream_.deviceBuffer;
\r
6501 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6502 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6503 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6506 buffer = stream_.userBuffer[0];
\r
6507 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6508 bufferBytes *= formatBytes( stream_.userFormat );
\r
6511 // No byte swapping necessary in DirectSound implementation.
\r
6513 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6514 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6516 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6517 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6519 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6520 nextWritePointer = handle->bufferPointer[0];
\r
6522 DWORD endWrite, leadPointer;
\r
6524 // Find out where the read and "safe write" pointers are.
\r
6525 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6526 if ( FAILED( result ) ) {
\r
6527 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6528 errorText_ = errorStream_.str();
\r
6529 MUTEX_UNLOCK( &stream_.mutex );
\r
6530 error( RtAudioError::SYSTEM_ERROR );
\r
6534 // We will copy our output buffer into the region between
\r
6535 // safeWritePointer and leadPointer. If leadPointer is not
\r
6536 // beyond the next endWrite position, wait until it is.
\r
6537 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6538 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6539 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6540 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6541 endWrite = nextWritePointer + bufferBytes;
\r
6543 // Check whether the entire write region is behind the play pointer.
\r
6544 if ( leadPointer >= endWrite ) break;
\r
6546 // If we are here, then we must wait until the leadPointer advances
\r
6547 // beyond the end of our next write region. We use the
\r
6548 // Sleep() function to suspend operation until that happens.
\r
6549 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6550 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6551 if ( millis < 1.0 ) millis = 1.0;
\r
6552 Sleep( (DWORD) millis );
\r
6555 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6556 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6557 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6558 handle->xrun[0] = true;
\r
6559 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6560 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6561 handle->bufferPointer[0] = nextWritePointer;
\r
6562 endWrite = nextWritePointer + bufferBytes;
\r
6565 // Lock free space in the buffer
\r
6566 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6567 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6568 if ( FAILED( result ) ) {
\r
6569 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6570 errorText_ = errorStream_.str();
\r
6571 MUTEX_UNLOCK( &stream_.mutex );
\r
6572 error( RtAudioError::SYSTEM_ERROR );
\r
6576 // Copy our buffer into the DS buffer
\r
6577 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6578 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6580 // Update our buffer offset and unlock sound buffer
\r
6581 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6582 if ( FAILED( result ) ) {
\r
6583 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6584 errorText_ = errorStream_.str();
\r
6585 MUTEX_UNLOCK( &stream_.mutex );
\r
6586 error( RtAudioError::SYSTEM_ERROR );
\r
6589 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6590 handle->bufferPointer[0] = nextWritePointer;
\r
6593 // Don't bother draining input
\r
6594 if ( handle->drainCounter ) {
\r
6595 handle->drainCounter++;
\r
6599 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6601 // Setup parameters.
\r
6602 if ( stream_.doConvertBuffer[1] ) {
\r
6603 buffer = stream_.deviceBuffer;
\r
6604 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6605 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6608 buffer = stream_.userBuffer[1];
\r
6609 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6610 bufferBytes *= formatBytes( stream_.userFormat );
\r
6613 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6614 long nextReadPointer = handle->bufferPointer[1];
\r
6615 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6617 // Find out where the write and "safe read" pointers are.
\r
6618 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6619 if ( FAILED( result ) ) {
\r
6620 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6621 errorText_ = errorStream_.str();
\r
6622 MUTEX_UNLOCK( &stream_.mutex );
\r
6623 error( RtAudioError::SYSTEM_ERROR );
\r
6627 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6628 DWORD endRead = nextReadPointer + bufferBytes;
\r
6630 // Handling depends on whether we are INPUT or DUPLEX.
\r
6631 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6632 // then a wait here will drag the write pointers into the forbidden zone.
\r
6634 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6635 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6636 // practical way to sync up the read and write pointers reliably, given the
\r
6637 // the very complex relationship between phase and increment of the read and write
\r
6640 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6641 // provide a pre-roll period of 0.5 seconds in which we return
\r
6642 // zeros from the read buffer while the pointers sync up.
\r
6644 if ( stream_.mode == DUPLEX ) {
\r
6645 if ( safeReadPointer < endRead ) {
\r
6646 if ( duplexPrerollBytes <= 0 ) {
\r
6647 // Pre-roll time over. Be more agressive.
\r
6648 int adjustment = endRead-safeReadPointer;
\r
6650 handle->xrun[1] = true;
\r
6652 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6653 // and perform fine adjustments later.
\r
6654 // - small adjustments: back off by twice as much.
\r
6655 if ( adjustment >= 2*bufferBytes )
\r
6656 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6658 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6660 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6664 // In pre=roll time. Just do it.
\r
6665 nextReadPointer = safeReadPointer - bufferBytes;
\r
6666 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6668 endRead = nextReadPointer + bufferBytes;
\r
6671 else { // mode == INPUT
\r
6672 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6673 // See comments for playback.
\r
6674 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6675 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6676 if ( millis < 1.0 ) millis = 1.0;
\r
6677 Sleep( (DWORD) millis );
\r
6679 // Wake up and find out where we are now.
\r
6680 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6681 if ( FAILED( result ) ) {
\r
6682 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6683 errorText_ = errorStream_.str();
\r
6684 MUTEX_UNLOCK( &stream_.mutex );
\r
6685 error( RtAudioError::SYSTEM_ERROR );
\r
6689 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6693 // Lock free space in the buffer
\r
6694 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6695 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6696 if ( FAILED( result ) ) {
\r
6697 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6698 errorText_ = errorStream_.str();
\r
6699 MUTEX_UNLOCK( &stream_.mutex );
\r
6700 error( RtAudioError::SYSTEM_ERROR );
\r
6704 if ( duplexPrerollBytes <= 0 ) {
\r
6705 // Copy our buffer into the DS buffer
\r
6706 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6707 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6710 memset( buffer, 0, bufferSize1 );
\r
6711 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6712 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6715 // Update our buffer offset and unlock sound buffer
\r
6716 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6717 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6718 if ( FAILED( result ) ) {
\r
6719 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6720 errorText_ = errorStream_.str();
\r
6721 MUTEX_UNLOCK( &stream_.mutex );
\r
6722 error( RtAudioError::SYSTEM_ERROR );
\r
6725 handle->bufferPointer[1] = nextReadPointer;
\r
6727 // No byte swapping necessary in DirectSound implementation.
\r
6729 // If necessary, convert 8-bit data from unsigned to signed.
\r
6730 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6731 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6733 // Do buffer conversion if necessary.
\r
6734 if ( stream_.doConvertBuffer[1] )
\r
6735 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6739 MUTEX_UNLOCK( &stream_.mutex );
\r
6740 RtApi::tickStreamTime();
\r
6743 // Definitions for utility functions and callbacks
\r
6744 // specific to the DirectSound implementation.
\r
6746 static unsigned __stdcall callbackHandler( void *ptr )
\r
6748 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6749 RtApiDs *object = (RtApiDs *) info->object;
\r
6750 bool* isRunning = &info->isRunning;
\r
6752 while ( *isRunning == true ) {
\r
6753 object->callbackEvent();
\r
6756 _endthreadex( 0 );
\r
6760 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6761 LPCTSTR description,
\r
6762 LPCTSTR /*module*/,
\r
6763 LPVOID lpContext )
\r
6765 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6766 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6769 bool validDevice = false;
\r
6770 if ( probeInfo.isInput == true ) {
\r
6772 LPDIRECTSOUNDCAPTURE object;
\r
6774 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6775 if ( hr != DS_OK ) return TRUE;
\r
6777 caps.dwSize = sizeof(caps);
\r
6778 hr = object->GetCaps( &caps );
\r
6779 if ( hr == DS_OK ) {
\r
6780 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6781 validDevice = true;
\r
6783 object->Release();
\r
6787 LPDIRECTSOUND object;
\r
6788 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6789 if ( hr != DS_OK ) return TRUE;
\r
6791 caps.dwSize = sizeof(caps);
\r
6792 hr = object->GetCaps( &caps );
\r
6793 if ( hr == DS_OK ) {
\r
6794 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6795 validDevice = true;
\r
6797 object->Release();
\r
6800 // If good device, then save its name and guid.
\r
6801 std::string name = convertCharPointerToStdString( description );
\r
6802 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6803 if ( lpguid == NULL )
\r
6804 name = "Default Device";
\r
6805 if ( validDevice ) {
\r
6806 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6807 if ( dsDevices[i].name == name ) {
\r
6808 dsDevices[i].found = true;
\r
6809 if ( probeInfo.isInput ) {
\r
6810 dsDevices[i].id[1] = lpguid;
\r
6811 dsDevices[i].validId[1] = true;
\r
6814 dsDevices[i].id[0] = lpguid;
\r
6815 dsDevices[i].validId[0] = true;
\r
6822 device.name = name;
\r
6823 device.found = true;
\r
6824 if ( probeInfo.isInput ) {
\r
6825 device.id[1] = lpguid;
\r
6826 device.validId[1] = true;
\r
6829 device.id[0] = lpguid;
\r
6830 device.validId[0] = true;
\r
6832 dsDevices.push_back( device );
\r
6838 static const char* getErrorString( int code )
\r
6842 case DSERR_ALLOCATED:
\r
6843 return "Already allocated";
\r
6845 case DSERR_CONTROLUNAVAIL:
\r
6846 return "Control unavailable";
\r
6848 case DSERR_INVALIDPARAM:
\r
6849 return "Invalid parameter";
\r
6851 case DSERR_INVALIDCALL:
\r
6852 return "Invalid call";
\r
6854 case DSERR_GENERIC:
\r
6855 return "Generic error";
\r
6857 case DSERR_PRIOLEVELNEEDED:
\r
6858 return "Priority level needed";
\r
6860 case DSERR_OUTOFMEMORY:
\r
6861 return "Out of memory";
\r
6863 case DSERR_BADFORMAT:
\r
6864 return "The sample rate or the channel format is not supported";
\r
6866 case DSERR_UNSUPPORTED:
\r
6867 return "Not supported";
\r
6869 case DSERR_NODRIVER:
\r
6870 return "No driver";
\r
6872 case DSERR_ALREADYINITIALIZED:
\r
6873 return "Already initialized";
\r
6875 case DSERR_NOAGGREGATION:
\r
6876 return "No aggregation";
\r
6878 case DSERR_BUFFERLOST:
\r
6879 return "Buffer lost";
\r
6881 case DSERR_OTHERAPPHASPRIO:
\r
6882 return "Another application already has priority";
\r
6884 case DSERR_UNINITIALIZED:
\r
6885 return "Uninitialized";
\r
6888 return "DirectSound unknown error";
\r
6891 //******************** End of __WINDOWS_DS__ *********************//
\r
6895 #if defined(__LINUX_ALSA__)
\r
6897 #include <alsa/asoundlib.h>
\r
6898 #include <unistd.h>
\r
6900 // A structure to hold various information related to the ALSA API
\r
6901 // implementation.
\r
6902 struct AlsaHandle {
\r
6903 snd_pcm_t *handles[2];
\r
6904 bool synchronized;
\r
6906 pthread_cond_t runnable_cv;
\r
6910 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6913 static void *alsaCallbackHandler( void * ptr );
\r
6915 RtApiAlsa :: RtApiAlsa()
\r
6917 // Nothing to do here.
\r
6920 RtApiAlsa :: ~RtApiAlsa()
\r
6922 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6925 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6927 unsigned nDevices = 0;
\r
6928 int result, subdevice, card;
\r
6930 snd_ctl_t *handle;
\r
6932 // Count cards and devices
\r
6934 snd_card_next( &card );
\r
6935 while ( card >= 0 ) {
\r
6936 sprintf( name, "hw:%d", card );
\r
6937 result = snd_ctl_open( &handle, name, 0 );
\r
6938 if ( result < 0 ) {
\r
6939 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6940 errorText_ = errorStream_.str();
\r
6941 error( RtAudioError::WARNING );
\r
6946 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6947 if ( result < 0 ) {
\r
6948 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6949 errorText_ = errorStream_.str();
\r
6950 error( RtAudioError::WARNING );
\r
6953 if ( subdevice < 0 )
\r
6958 snd_ctl_close( handle );
\r
6959 snd_card_next( &card );
\r
6962 result = snd_ctl_open( &handle, "default", 0 );
\r
6963 if (result == 0) {
\r
6965 snd_ctl_close( handle );
\r
6971 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6973 RtAudio::DeviceInfo info;
\r
6974 info.probed = false;
\r
6976 unsigned nDevices = 0;
\r
6977 int result, subdevice, card;
\r
6979 snd_ctl_t *chandle;
\r
6981 // Count cards and devices
\r
6984 snd_card_next( &card );
\r
6985 while ( card >= 0 ) {
\r
6986 sprintf( name, "hw:%d", card );
\r
6987 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6988 if ( result < 0 ) {
\r
6989 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6990 errorText_ = errorStream_.str();
\r
6991 error( RtAudioError::WARNING );
\r
6996 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6997 if ( result < 0 ) {
\r
6998 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6999 errorText_ = errorStream_.str();
\r
7000 error( RtAudioError::WARNING );
\r
7003 if ( subdevice < 0 ) break;
\r
7004 if ( nDevices == device ) {
\r
7005 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7011 snd_ctl_close( chandle );
\r
7012 snd_card_next( &card );
\r
7015 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7016 if ( result == 0 ) {
\r
7017 if ( nDevices == device ) {
\r
7018 strcpy( name, "default" );
\r
7024 if ( nDevices == 0 ) {
\r
7025 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
7026 error( RtAudioError::INVALID_USE );
\r
7030 if ( device >= nDevices ) {
\r
7031 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
7032 error( RtAudioError::INVALID_USE );
\r
7038 // If a stream is already open, we cannot probe the stream devices.
\r
7039 // Thus, use the saved results.
\r
7040 if ( stream_.state != STREAM_CLOSED &&
\r
7041 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
7042 snd_ctl_close( chandle );
\r
7043 if ( device >= devices_.size() ) {
\r
7044 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
7045 error( RtAudioError::WARNING );
\r
7048 return devices_[ device ];
\r
7051 int openMode = SND_PCM_ASYNC;
\r
7052 snd_pcm_stream_t stream;
\r
7053 snd_pcm_info_t *pcminfo;
\r
7054 snd_pcm_info_alloca( &pcminfo );
\r
7055 snd_pcm_t *phandle;
\r
7056 snd_pcm_hw_params_t *params;
\r
7057 snd_pcm_hw_params_alloca( ¶ms );
\r
7059 // First try for playback unless default device (which has subdev -1)
\r
7060 stream = SND_PCM_STREAM_PLAYBACK;
\r
7061 snd_pcm_info_set_stream( pcminfo, stream );
\r
7062 if ( subdevice != -1 ) {
\r
7063 snd_pcm_info_set_device( pcminfo, subdevice );
\r
7064 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
7066 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7067 if ( result < 0 ) {
\r
7068 // Device probably doesn't support playback.
\r
7069 goto captureProbe;
\r
7073 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7074 if ( result < 0 ) {
\r
7075 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7076 errorText_ = errorStream_.str();
\r
7077 error( RtAudioError::WARNING );
\r
7078 goto captureProbe;
\r
7081 // The device is open ... fill the parameter structure.
\r
7082 result = snd_pcm_hw_params_any( phandle, params );
\r
7083 if ( result < 0 ) {
\r
7084 snd_pcm_close( phandle );
\r
7085 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7086 errorText_ = errorStream_.str();
\r
7087 error( RtAudioError::WARNING );
\r
7088 goto captureProbe;
\r
7091 // Get output channel information.
\r
7092 unsigned int value;
\r
7093 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7094 if ( result < 0 ) {
\r
7095 snd_pcm_close( phandle );
\r
7096 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7097 errorText_ = errorStream_.str();
\r
7098 error( RtAudioError::WARNING );
\r
7099 goto captureProbe;
\r
7101 info.outputChannels = value;
\r
7102 snd_pcm_close( phandle );
\r
7105 stream = SND_PCM_STREAM_CAPTURE;
\r
7106 snd_pcm_info_set_stream( pcminfo, stream );
\r
7108 // Now try for capture unless default device (with subdev = -1)
\r
7109 if ( subdevice != -1 ) {
\r
7110 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7111 snd_ctl_close( chandle );
\r
7112 if ( result < 0 ) {
\r
7113 // Device probably doesn't support capture.
\r
7114 if ( info.outputChannels == 0 ) return info;
\r
7115 goto probeParameters;
\r
7119 snd_ctl_close( chandle );
\r
7121 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7122 if ( result < 0 ) {
\r
7123 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7124 errorText_ = errorStream_.str();
\r
7125 error( RtAudioError::WARNING );
\r
7126 if ( info.outputChannels == 0 ) return info;
\r
7127 goto probeParameters;
\r
7130 // The device is open ... fill the parameter structure.
\r
7131 result = snd_pcm_hw_params_any( phandle, params );
\r
7132 if ( result < 0 ) {
\r
7133 snd_pcm_close( phandle );
\r
7134 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7135 errorText_ = errorStream_.str();
\r
7136 error( RtAudioError::WARNING );
\r
7137 if ( info.outputChannels == 0 ) return info;
\r
7138 goto probeParameters;
\r
7141 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7142 if ( result < 0 ) {
\r
7143 snd_pcm_close( phandle );
\r
7144 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7145 errorText_ = errorStream_.str();
\r
7146 error( RtAudioError::WARNING );
\r
7147 if ( info.outputChannels == 0 ) return info;
\r
7148 goto probeParameters;
\r
7150 info.inputChannels = value;
\r
7151 snd_pcm_close( phandle );
\r
7153 // If device opens for both playback and capture, we determine the channels.
\r
7154 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7155 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7157 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7158 if ( device == 0 && info.outputChannels > 0 )
\r
7159 info.isDefaultOutput = true;
\r
7160 if ( device == 0 && info.inputChannels > 0 )
\r
7161 info.isDefaultInput = true;
\r
7164 // At this point, we just need to figure out the supported data
\r
7165 // formats and sample rates. We'll proceed by opening the device in
\r
7166 // the direction with the maximum number of channels, or playback if
\r
7167 // they are equal. This might limit our sample rate options, but so
\r
7170 if ( info.outputChannels >= info.inputChannels )
\r
7171 stream = SND_PCM_STREAM_PLAYBACK;
\r
7173 stream = SND_PCM_STREAM_CAPTURE;
\r
7174 snd_pcm_info_set_stream( pcminfo, stream );
\r
7176 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7177 if ( result < 0 ) {
\r
7178 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7179 errorText_ = errorStream_.str();
\r
7180 error( RtAudioError::WARNING );
\r
7184 // The device is open ... fill the parameter structure.
\r
7185 result = snd_pcm_hw_params_any( phandle, params );
\r
7186 if ( result < 0 ) {
\r
7187 snd_pcm_close( phandle );
\r
7188 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7189 errorText_ = errorStream_.str();
\r
7190 error( RtAudioError::WARNING );
\r
7194 // Test our discrete set of sample rate values.
\r
7195 info.sampleRates.clear();
\r
7196 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7197 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7198 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7200 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7201 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7204 if ( info.sampleRates.size() == 0 ) {
\r
7205 snd_pcm_close( phandle );
\r
7206 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7207 errorText_ = errorStream_.str();
\r
7208 error( RtAudioError::WARNING );
\r
7212 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7213 snd_pcm_format_t format;
\r
7214 info.nativeFormats = 0;
\r
7215 format = SND_PCM_FORMAT_S8;
\r
7216 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7217 info.nativeFormats |= RTAUDIO_SINT8;
\r
7218 format = SND_PCM_FORMAT_S16;
\r
7219 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7220 info.nativeFormats |= RTAUDIO_SINT16;
\r
7221 format = SND_PCM_FORMAT_S24;
\r
7222 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7223 info.nativeFormats |= RTAUDIO_SINT24;
\r
7224 format = SND_PCM_FORMAT_S32;
\r
7225 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7226 info.nativeFormats |= RTAUDIO_SINT32;
\r
7227 format = SND_PCM_FORMAT_FLOAT;
\r
7228 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7229 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7230 format = SND_PCM_FORMAT_FLOAT64;
\r
7231 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7232 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7234 // Check that we have at least one supported format
\r
7235 if ( info.nativeFormats == 0 ) {
\r
7236 snd_pcm_close( phandle );
\r
7237 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7238 errorText_ = errorStream_.str();
\r
7239 error( RtAudioError::WARNING );
\r
7243 // Get the device name
\r
7245 result = snd_card_get_name( card, &cardname );
\r
7246 if ( result >= 0 ) {
\r
7247 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7252 // That's all ... close the device and return
\r
7253 snd_pcm_close( phandle );
\r
7254 info.probed = true;
\r
7258 void RtApiAlsa :: saveDeviceInfo( void )
\r
7262 unsigned int nDevices = getDeviceCount();
\r
7263 devices_.resize( nDevices );
\r
7264 for ( unsigned int i=0; i<nDevices; i++ )
\r
7265 devices_[i] = getDeviceInfo( i );
\r
7268 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7269 unsigned int firstChannel, unsigned int sampleRate,
\r
7270 RtAudioFormat format, unsigned int *bufferSize,
\r
7271 RtAudio::StreamOptions *options )
\r
7274 #if defined(__RTAUDIO_DEBUG__)
\r
7275 snd_output_t *out;
\r
7276 snd_output_stdio_attach(&out, stderr, 0);
\r
7279 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7281 unsigned nDevices = 0;
\r
7282 int result, subdevice, card;
\r
7284 snd_ctl_t *chandle;
\r
7286 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7287 snprintf(name, sizeof(name), "%s", "default");
\r
7289 // Count cards and devices
\r
7291 snd_card_next( &card );
\r
7292 while ( card >= 0 ) {
\r
7293 sprintf( name, "hw:%d", card );
\r
7294 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7295 if ( result < 0 ) {
\r
7296 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7297 errorText_ = errorStream_.str();
\r
7302 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7303 if ( result < 0 ) break;
\r
7304 if ( subdevice < 0 ) break;
\r
7305 if ( nDevices == device ) {
\r
7306 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7307 snd_ctl_close( chandle );
\r
7312 snd_ctl_close( chandle );
\r
7313 snd_card_next( &card );
\r
7316 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7317 if ( result == 0 ) {
\r
7318 if ( nDevices == device ) {
\r
7319 strcpy( name, "default" );
\r
7325 if ( nDevices == 0 ) {
\r
7326 // This should not happen because a check is made before this function is called.
\r
7327 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7331 if ( device >= nDevices ) {
\r
7332 // This should not happen because a check is made before this function is called.
\r
7333 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7340 // The getDeviceInfo() function will not work for a device that is
\r
7341 // already open. Thus, we'll probe the system before opening a
\r
7342 // stream and save the results for use by getDeviceInfo().
\r
7343 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7344 this->saveDeviceInfo();
\r
7346 snd_pcm_stream_t stream;
\r
7347 if ( mode == OUTPUT )
\r
7348 stream = SND_PCM_STREAM_PLAYBACK;
\r
7350 stream = SND_PCM_STREAM_CAPTURE;
\r
7352 snd_pcm_t *phandle;
\r
7353 int openMode = SND_PCM_ASYNC;
\r
7354 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7355 if ( result < 0 ) {
\r
7356 if ( mode == OUTPUT )
\r
7357 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7359 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7360 errorText_ = errorStream_.str();
\r
7364 // Fill the parameter structure.
\r
7365 snd_pcm_hw_params_t *hw_params;
\r
7366 snd_pcm_hw_params_alloca( &hw_params );
\r
7367 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7368 if ( result < 0 ) {
\r
7369 snd_pcm_close( phandle );
\r
7370 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7371 errorText_ = errorStream_.str();
\r
7375 #if defined(__RTAUDIO_DEBUG__)
\r
7376 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7377 snd_pcm_hw_params_dump( hw_params, out );
\r
7380 // Set access ... check user preference.
\r
7381 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7382 stream_.userInterleaved = false;
\r
7383 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7384 if ( result < 0 ) {
\r
7385 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7386 stream_.deviceInterleaved[mode] = true;
\r
7389 stream_.deviceInterleaved[mode] = false;
\r
7392 stream_.userInterleaved = true;
\r
7393 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7394 if ( result < 0 ) {
\r
7395 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7396 stream_.deviceInterleaved[mode] = false;
\r
7399 stream_.deviceInterleaved[mode] = true;
\r
7402 if ( result < 0 ) {
\r
7403 snd_pcm_close( phandle );
\r
7404 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7405 errorText_ = errorStream_.str();
\r
7409 // Determine how to set the device format.
\r
7410 stream_.userFormat = format;
\r
7411 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7413 if ( format == RTAUDIO_SINT8 )
\r
7414 deviceFormat = SND_PCM_FORMAT_S8;
\r
7415 else if ( format == RTAUDIO_SINT16 )
\r
7416 deviceFormat = SND_PCM_FORMAT_S16;
\r
7417 else if ( format == RTAUDIO_SINT24 )
\r
7418 deviceFormat = SND_PCM_FORMAT_S24;
\r
7419 else if ( format == RTAUDIO_SINT32 )
\r
7420 deviceFormat = SND_PCM_FORMAT_S32;
\r
7421 else if ( format == RTAUDIO_FLOAT32 )
\r
7422 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7423 else if ( format == RTAUDIO_FLOAT64 )
\r
7424 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7426 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7427 stream_.deviceFormat[mode] = format;
\r
7431 // The user requested format is not natively supported by the device.
\r
7432 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7433 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7434 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7438 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7439 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7440 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7444 deviceFormat = SND_PCM_FORMAT_S32;
\r
7445 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7446 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7450 deviceFormat = SND_PCM_FORMAT_S24;
\r
7451 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7452 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7456 deviceFormat = SND_PCM_FORMAT_S16;
\r
7457 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7458 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7462 deviceFormat = SND_PCM_FORMAT_S8;
\r
7463 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7464 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7468 // If we get here, no supported format was found.
\r
7469 snd_pcm_close( phandle );
\r
7470 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7471 errorText_ = errorStream_.str();
\r
7475 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7476 if ( result < 0 ) {
\r
7477 snd_pcm_close( phandle );
\r
7478 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7479 errorText_ = errorStream_.str();
\r
7483 // Determine whether byte-swaping is necessary.
\r
7484 stream_.doByteSwap[mode] = false;
\r
7485 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7486 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7487 if ( result == 0 )
\r
7488 stream_.doByteSwap[mode] = true;
\r
7489 else if (result < 0) {
\r
7490 snd_pcm_close( phandle );
\r
7491 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7492 errorText_ = errorStream_.str();
\r
7497 // Set the sample rate.
\r
7498 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7499 if ( result < 0 ) {
\r
7500 snd_pcm_close( phandle );
\r
7501 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7502 errorText_ = errorStream_.str();
\r
7506 // Determine the number of channels for this device. We support a possible
\r
7507 // minimum device channel number > than the value requested by the user.
\r
7508 stream_.nUserChannels[mode] = channels;
\r
7509 unsigned int value;
\r
7510 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7511 unsigned int deviceChannels = value;
\r
7512 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7513 snd_pcm_close( phandle );
\r
7514 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7515 errorText_ = errorStream_.str();
\r
7519 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7520 if ( result < 0 ) {
\r
7521 snd_pcm_close( phandle );
\r
7522 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7523 errorText_ = errorStream_.str();
\r
7526 deviceChannels = value;
\r
7527 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7528 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7530 // Set the device channels.
\r
7531 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7532 if ( result < 0 ) {
\r
7533 snd_pcm_close( phandle );
\r
7534 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7535 errorText_ = errorStream_.str();
\r
7539 // Set the buffer (or period) size.
\r
7541 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7542 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7543 if ( result < 0 ) {
\r
7544 snd_pcm_close( phandle );
\r
7545 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7546 errorText_ = errorStream_.str();
\r
7549 *bufferSize = periodSize;
\r
7551 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7552 unsigned int periods = 0;
\r
7553 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7554 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7555 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7556 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7557 if ( result < 0 ) {
\r
7558 snd_pcm_close( phandle );
\r
7559 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7560 errorText_ = errorStream_.str();
\r
7564 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7565 // MUST be the same in both directions!
\r
7566 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7567 snd_pcm_close( phandle );
\r
7568 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7569 errorText_ = errorStream_.str();
\r
7573 stream_.bufferSize = *bufferSize;
\r
7575 // Install the hardware configuration
\r
7576 result = snd_pcm_hw_params( phandle, hw_params );
\r
7577 if ( result < 0 ) {
\r
7578 snd_pcm_close( phandle );
\r
7579 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7580 errorText_ = errorStream_.str();
\r
7584 #if defined(__RTAUDIO_DEBUG__)
\r
7585 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7586 snd_pcm_hw_params_dump( hw_params, out );
\r
7589 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7590 snd_pcm_sw_params_t *sw_params = NULL;
\r
7591 snd_pcm_sw_params_alloca( &sw_params );
\r
7592 snd_pcm_sw_params_current( phandle, sw_params );
\r
7593 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7594 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7595 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7597 // The following two settings were suggested by Theo Veenker
\r
7598 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7599 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7601 // here are two options for a fix
\r
7602 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7603 snd_pcm_uframes_t val;
\r
7604 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7605 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7607 result = snd_pcm_sw_params( phandle, sw_params );
\r
7608 if ( result < 0 ) {
\r
7609 snd_pcm_close( phandle );
\r
7610 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7611 errorText_ = errorStream_.str();
\r
7615 #if defined(__RTAUDIO_DEBUG__)
\r
7616 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7617 snd_pcm_sw_params_dump( sw_params, out );
\r
7620 // Set flags for buffer conversion
\r
7621 stream_.doConvertBuffer[mode] = false;
\r
7622 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7623 stream_.doConvertBuffer[mode] = true;
\r
7624 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7625 stream_.doConvertBuffer[mode] = true;
\r
7626 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7627 stream_.nUserChannels[mode] > 1 )
\r
7628 stream_.doConvertBuffer[mode] = true;
\r
7630 // Allocate the ApiHandle if necessary and then save.
\r
7631 AlsaHandle *apiInfo = 0;
\r
7632 if ( stream_.apiHandle == 0 ) {
\r
7634 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7636 catch ( std::bad_alloc& ) {
\r
7637 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7641 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7642 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7646 stream_.apiHandle = (void *) apiInfo;
\r
7647 apiInfo->handles[0] = 0;
\r
7648 apiInfo->handles[1] = 0;
\r
7651 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7653 apiInfo->handles[mode] = phandle;
\r
7656 // Allocate necessary internal buffers.
\r
7657 unsigned long bufferBytes;
\r
7658 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7659 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7660 if ( stream_.userBuffer[mode] == NULL ) {
\r
7661 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7665 if ( stream_.doConvertBuffer[mode] ) {
\r
7667 bool makeBuffer = true;
\r
7668 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7669 if ( mode == INPUT ) {
\r
7670 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7671 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7672 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7676 if ( makeBuffer ) {
\r
7677 bufferBytes *= *bufferSize;
\r
7678 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7679 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7680 if ( stream_.deviceBuffer == NULL ) {
\r
7681 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7687 stream_.sampleRate = sampleRate;
\r
7688 stream_.nBuffers = periods;
\r
7689 stream_.device[mode] = device;
\r
7690 stream_.state = STREAM_STOPPED;
\r
7692 // Setup the buffer conversion information structure.
\r
7693 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7695 // Setup thread if necessary.
\r
7696 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7697 // We had already set up an output stream.
\r
7698 stream_.mode = DUPLEX;
\r
7699 // Link the streams if possible.
\r
7700 apiInfo->synchronized = false;
\r
7701 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7702 apiInfo->synchronized = true;
\r
7704 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7705 error( RtAudioError::WARNING );
\r
7709 stream_.mode = mode;
\r
7711 // Setup callback thread.
\r
7712 stream_.callbackInfo.object = (void *) this;
\r
7714 // Set the thread attributes for joinable and realtime scheduling
\r
7715 // priority (optional). The higher priority will only take affect
\r
7716 // if the program is run as root or suid. Note, under Linux
\r
7717 // processes with CAP_SYS_NICE privilege, a user can change
\r
7718 // scheduling policy and priority (thus need not be root). See
\r
7719 // POSIX "capabilities".
\r
7720 pthread_attr_t attr;
\r
7721 pthread_attr_init( &attr );
\r
7722 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7724 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7725 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7726 // We previously attempted to increase the audio callback priority
\r
7727 // to SCHED_RR here via the attributes. However, while no errors
\r
7728 // were reported in doing so, it did not work. So, now this is
\r
7729 // done in the alsaCallbackHandler function.
\r
7730 stream_.callbackInfo.doRealtime = true;
\r
7731 int priority = options->priority;
\r
7732 int min = sched_get_priority_min( SCHED_RR );
\r
7733 int max = sched_get_priority_max( SCHED_RR );
\r
7734 if ( priority < min ) priority = min;
\r
7735 else if ( priority > max ) priority = max;
\r
7736 stream_.callbackInfo.priority = priority;
\r
7740 stream_.callbackInfo.isRunning = true;
\r
7741 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7742 pthread_attr_destroy( &attr );
\r
7744 stream_.callbackInfo.isRunning = false;
\r
7745 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7754 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7755 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7756 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7758 stream_.apiHandle = 0;
\r
7761 if ( phandle) snd_pcm_close( phandle );
\r
7763 for ( int i=0; i<2; i++ ) {
\r
7764 if ( stream_.userBuffer[i] ) {
\r
7765 free( stream_.userBuffer[i] );
\r
7766 stream_.userBuffer[i] = 0;
\r
7770 if ( stream_.deviceBuffer ) {
\r
7771 free( stream_.deviceBuffer );
\r
7772 stream_.deviceBuffer = 0;
\r
7775 stream_.state = STREAM_CLOSED;
\r
7779 void RtApiAlsa :: closeStream()
\r
7781 if ( stream_.state == STREAM_CLOSED ) {
\r
7782 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7783 error( RtAudioError::WARNING );
\r
7787 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7788 stream_.callbackInfo.isRunning = false;
\r
7789 MUTEX_LOCK( &stream_.mutex );
\r
7790 if ( stream_.state == STREAM_STOPPED ) {
\r
7791 apiInfo->runnable = true;
\r
7792 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7794 MUTEX_UNLOCK( &stream_.mutex );
\r
7795 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7797 if ( stream_.state == STREAM_RUNNING ) {
\r
7798 stream_.state = STREAM_STOPPED;
\r
7799 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7800 snd_pcm_drop( apiInfo->handles[0] );
\r
7801 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7802 snd_pcm_drop( apiInfo->handles[1] );
\r
7806 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7807 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7808 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7810 stream_.apiHandle = 0;
\r
7813 for ( int i=0; i<2; i++ ) {
\r
7814 if ( stream_.userBuffer[i] ) {
\r
7815 free( stream_.userBuffer[i] );
\r
7816 stream_.userBuffer[i] = 0;
\r
7820 if ( stream_.deviceBuffer ) {
\r
7821 free( stream_.deviceBuffer );
\r
7822 stream_.deviceBuffer = 0;
\r
7825 stream_.mode = UNINITIALIZED;
\r
7826 stream_.state = STREAM_CLOSED;
\r
7829 void RtApiAlsa :: startStream()
\r
7831 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7834 if ( stream_.state == STREAM_RUNNING ) {
\r
7835 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7836 error( RtAudioError::WARNING );
\r
7840 MUTEX_LOCK( &stream_.mutex );
\r
7843 snd_pcm_state_t state;
\r
7844 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7845 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7846 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7847 state = snd_pcm_state( handle[0] );
\r
7848 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7849 result = snd_pcm_prepare( handle[0] );
\r
7850 if ( result < 0 ) {
\r
7851 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7852 errorText_ = errorStream_.str();
\r
7858 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7859 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7860 state = snd_pcm_state( handle[1] );
\r
7861 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7862 result = snd_pcm_prepare( handle[1] );
\r
7863 if ( result < 0 ) {
\r
7864 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7865 errorText_ = errorStream_.str();
\r
7871 stream_.state = STREAM_RUNNING;
\r
7874 apiInfo->runnable = true;
\r
7875 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7876 MUTEX_UNLOCK( &stream_.mutex );
\r
7878 if ( result >= 0 ) return;
\r
7879 error( RtAudioError::SYSTEM_ERROR );
\r
7882 void RtApiAlsa :: stopStream()
\r
7885 if ( stream_.state == STREAM_STOPPED ) {
\r
7886 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7887 error( RtAudioError::WARNING );
\r
7891 stream_.state = STREAM_STOPPED;
\r
7892 MUTEX_LOCK( &stream_.mutex );
\r
7895 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7896 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7897 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7898 if ( apiInfo->synchronized )
\r
7899 result = snd_pcm_drop( handle[0] );
\r
7901 result = snd_pcm_drain( handle[0] );
\r
7902 if ( result < 0 ) {
\r
7903 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7904 errorText_ = errorStream_.str();
\r
7909 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7910 result = snd_pcm_drop( handle[1] );
\r
7911 if ( result < 0 ) {
\r
7912 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7913 errorText_ = errorStream_.str();
\r
7919 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7920 MUTEX_UNLOCK( &stream_.mutex );
\r
7922 if ( result >= 0 ) return;
\r
7923 error( RtAudioError::SYSTEM_ERROR );
\r
7926 void RtApiAlsa :: abortStream()
\r
7929 if ( stream_.state == STREAM_STOPPED ) {
\r
7930 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7931 error( RtAudioError::WARNING );
\r
7935 stream_.state = STREAM_STOPPED;
\r
7936 MUTEX_LOCK( &stream_.mutex );
\r
7939 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7940 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7941 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7942 result = snd_pcm_drop( handle[0] );
\r
7943 if ( result < 0 ) {
\r
7944 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7945 errorText_ = errorStream_.str();
\r
7950 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7951 result = snd_pcm_drop( handle[1] );
\r
7952 if ( result < 0 ) {
\r
7953 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7954 errorText_ = errorStream_.str();
\r
7960 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7961 MUTEX_UNLOCK( &stream_.mutex );
\r
7963 if ( result >= 0 ) return;
\r
7964 error( RtAudioError::SYSTEM_ERROR );
\r
7967 void RtApiAlsa :: callbackEvent()
\r
7969 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7970 if ( stream_.state == STREAM_STOPPED ) {
\r
7971 MUTEX_LOCK( &stream_.mutex );
\r
7972 while ( !apiInfo->runnable )
\r
7973 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7975 if ( stream_.state != STREAM_RUNNING ) {
\r
7976 MUTEX_UNLOCK( &stream_.mutex );
\r
7979 MUTEX_UNLOCK( &stream_.mutex );
\r
7982 if ( stream_.state == STREAM_CLOSED ) {
\r
7983 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7984 error( RtAudioError::WARNING );
\r
7988 int doStopStream = 0;
\r
7989 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7990 double streamTime = getStreamTime();
\r
7991 RtAudioStreamStatus status = 0;
\r
7992 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7993 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7994 apiInfo->xrun[0] = false;
\r
7996 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7997 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7998 apiInfo->xrun[1] = false;
\r
8000 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
8001 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
8003 if ( doStopStream == 2 ) {
\r
8008 MUTEX_LOCK( &stream_.mutex );
\r
8010 // The state might change while waiting on a mutex.
\r
8011 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
8016 snd_pcm_t **handle;
\r
8017 snd_pcm_sframes_t frames;
\r
8018 RtAudioFormat format;
\r
8019 handle = (snd_pcm_t **) apiInfo->handles;
\r
8021 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
8023 // Setup parameters.
\r
8024 if ( stream_.doConvertBuffer[1] ) {
\r
8025 buffer = stream_.deviceBuffer;
\r
8026 channels = stream_.nDeviceChannels[1];
\r
8027 format = stream_.deviceFormat[1];
\r
8030 buffer = stream_.userBuffer[1];
\r
8031 channels = stream_.nUserChannels[1];
\r
8032 format = stream_.userFormat;
\r
8035 // Read samples from device in interleaved/non-interleaved format.
\r
8036 if ( stream_.deviceInterleaved[1] )
\r
8037 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
8039 void *bufs[channels];
\r
8040 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8041 for ( int i=0; i<channels; i++ )
\r
8042 bufs[i] = (void *) (buffer + (i * offset));
\r
8043 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
8046 if ( result < (int) stream_.bufferSize ) {
\r
8047 // Either an error or overrun occured.
\r
8048 if ( result == -EPIPE ) {
\r
8049 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
8050 if ( state == SND_PCM_STATE_XRUN ) {
\r
8051 apiInfo->xrun[1] = true;
\r
8052 result = snd_pcm_prepare( handle[1] );
\r
8053 if ( result < 0 ) {
\r
8054 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
8055 errorText_ = errorStream_.str();
\r
8059 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8060 errorText_ = errorStream_.str();
\r
8064 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
8065 errorText_ = errorStream_.str();
\r
8067 error( RtAudioError::WARNING );
\r
8071 // Do byte swapping if necessary.
\r
8072 if ( stream_.doByteSwap[1] )
\r
8073 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8075 // Do buffer conversion if necessary.
\r
8076 if ( stream_.doConvertBuffer[1] )
\r
8077 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8079 // Check stream latency
\r
8080 result = snd_pcm_delay( handle[1], &frames );
\r
8081 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8086 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8088 // Setup parameters and do buffer conversion if necessary.
\r
8089 if ( stream_.doConvertBuffer[0] ) {
\r
8090 buffer = stream_.deviceBuffer;
\r
8091 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8092 channels = stream_.nDeviceChannels[0];
\r
8093 format = stream_.deviceFormat[0];
\r
8096 buffer = stream_.userBuffer[0];
\r
8097 channels = stream_.nUserChannels[0];
\r
8098 format = stream_.userFormat;
\r
8101 // Do byte swapping if necessary.
\r
8102 if ( stream_.doByteSwap[0] )
\r
8103 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8105 // Write samples to device in interleaved/non-interleaved format.
\r
8106 if ( stream_.deviceInterleaved[0] )
\r
8107 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8109 void *bufs[channels];
\r
8110 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8111 for ( int i=0; i<channels; i++ )
\r
8112 bufs[i] = (void *) (buffer + (i * offset));
\r
8113 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8116 if ( result < (int) stream_.bufferSize ) {
\r
8117 // Either an error or underrun occured.
\r
8118 if ( result == -EPIPE ) {
\r
8119 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8120 if ( state == SND_PCM_STATE_XRUN ) {
\r
8121 apiInfo->xrun[0] = true;
\r
8122 result = snd_pcm_prepare( handle[0] );
\r
8123 if ( result < 0 ) {
\r
8124 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8125 errorText_ = errorStream_.str();
\r
8128 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8131 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8132 errorText_ = errorStream_.str();
\r
8136 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8137 errorText_ = errorStream_.str();
\r
8139 error( RtAudioError::WARNING );
\r
8143 // Check stream latency
\r
8144 result = snd_pcm_delay( handle[0], &frames );
\r
8145 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8149 MUTEX_UNLOCK( &stream_.mutex );
\r
8151 RtApi::tickStreamTime();
\r
8152 if ( doStopStream == 1 ) this->stopStream();
\r
8155 static void *alsaCallbackHandler( void *ptr )
\r
8157 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8158 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8159 bool *isRunning = &info->isRunning;
\r
8161 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8162 if ( info->doRealtime ) {
\r
8163 pthread_t tID = pthread_self(); // ID of this thread
\r
8164 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8165 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8169 while ( *isRunning == true ) {
\r
8170 pthread_testcancel();
\r
8171 object->callbackEvent();
\r
8174 pthread_exit( NULL );
\r
8177 //******************** End of __LINUX_ALSA__ *********************//
\r
8180 #if defined(__LINUX_PULSE__)
\r
8182 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8183 // and Tristan Matthews.
\r
8185 #include <pulse/error.h>
\r
8186 #include <pulse/simple.h>
\r
8189 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8190 44100, 48000, 96000, 0};
\r
8192 struct rtaudio_pa_format_mapping_t {
\r
8193 RtAudioFormat rtaudio_format;
\r
8194 pa_sample_format_t pa_format;
\r
8197 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8198 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8199 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8200 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8201 {0, PA_SAMPLE_INVALID}};
\r
8203 struct PulseAudioHandle {
\r
8204 pa_simple *s_play;
\r
8207 pthread_cond_t runnable_cv;
\r
8209 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8212 RtApiPulse::~RtApiPulse()
\r
8214 if ( stream_.state != STREAM_CLOSED )
\r
8218 unsigned int RtApiPulse::getDeviceCount( void )
\r
8223 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8225 RtAudio::DeviceInfo info;
\r
8226 info.probed = true;
\r
8227 info.name = "PulseAudio";
\r
8228 info.outputChannels = 2;
\r
8229 info.inputChannels = 2;
\r
8230 info.duplexChannels = 2;
\r
8231 info.isDefaultOutput = true;
\r
8232 info.isDefaultInput = true;
\r
8234 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8235 info.sampleRates.push_back( *sr );
\r
8237 info.preferredSampleRate = 48000;
\r
8238 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8243 static void *pulseaudio_callback( void * user )
\r
8245 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8246 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8247 volatile bool *isRunning = &cbi->isRunning;
\r
8249 while ( *isRunning ) {
\r
8250 pthread_testcancel();
\r
8251 context->callbackEvent();
\r
8254 pthread_exit( NULL );
\r
8257 void RtApiPulse::closeStream( void )
\r
8259 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8261 stream_.callbackInfo.isRunning = false;
\r
8263 MUTEX_LOCK( &stream_.mutex );
\r
8264 if ( stream_.state == STREAM_STOPPED ) {
\r
8265 pah->runnable = true;
\r
8266 pthread_cond_signal( &pah->runnable_cv );
\r
8268 MUTEX_UNLOCK( &stream_.mutex );
\r
8270 pthread_join( pah->thread, 0 );
\r
8271 if ( pah->s_play ) {
\r
8272 pa_simple_flush( pah->s_play, NULL );
\r
8273 pa_simple_free( pah->s_play );
\r
8276 pa_simple_free( pah->s_rec );
\r
8278 pthread_cond_destroy( &pah->runnable_cv );
\r
8280 stream_.apiHandle = 0;
\r
8283 if ( stream_.userBuffer[0] ) {
\r
8284 free( stream_.userBuffer[0] );
\r
8285 stream_.userBuffer[0] = 0;
\r
8287 if ( stream_.userBuffer[1] ) {
\r
8288 free( stream_.userBuffer[1] );
\r
8289 stream_.userBuffer[1] = 0;
\r
8292 stream_.state = STREAM_CLOSED;
\r
8293 stream_.mode = UNINITIALIZED;
\r
8296 void RtApiPulse::callbackEvent( void )
\r
8298 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8300 if ( stream_.state == STREAM_STOPPED ) {
\r
8301 MUTEX_LOCK( &stream_.mutex );
\r
8302 while ( !pah->runnable )
\r
8303 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8305 if ( stream_.state != STREAM_RUNNING ) {
\r
8306 MUTEX_UNLOCK( &stream_.mutex );
\r
8309 MUTEX_UNLOCK( &stream_.mutex );
\r
8312 if ( stream_.state == STREAM_CLOSED ) {
\r
8313 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8314 "this shouldn't happen!";
\r
8315 error( RtAudioError::WARNING );
\r
8319 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8320 double streamTime = getStreamTime();
\r
8321 RtAudioStreamStatus status = 0;
\r
8322 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8323 stream_.bufferSize, streamTime, status,
\r
8324 stream_.callbackInfo.userData );
\r
8326 if ( doStopStream == 2 ) {
\r
8331 MUTEX_LOCK( &stream_.mutex );
\r
8332 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8333 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8335 if ( stream_.state != STREAM_RUNNING )
\r
8340 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8341 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8342 convertBuffer( stream_.deviceBuffer,
\r
8343 stream_.userBuffer[OUTPUT],
\r
8344 stream_.convertInfo[OUTPUT] );
\r
8345 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8346 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8348 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8349 formatBytes( stream_.userFormat );
\r
8351 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8352 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8353 pa_strerror( pa_error ) << ".";
\r
8354 errorText_ = errorStream_.str();
\r
8355 error( RtAudioError::WARNING );
\r
8359 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8360 if ( stream_.doConvertBuffer[INPUT] )
\r
8361 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8362 formatBytes( stream_.deviceFormat[INPUT] );
\r
8364 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8365 formatBytes( stream_.userFormat );
\r
8367 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8368 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8369 pa_strerror( pa_error ) << ".";
\r
8370 errorText_ = errorStream_.str();
\r
8371 error( RtAudioError::WARNING );
\r
8373 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8374 convertBuffer( stream_.userBuffer[INPUT],
\r
8375 stream_.deviceBuffer,
\r
8376 stream_.convertInfo[INPUT] );
\r
8381 MUTEX_UNLOCK( &stream_.mutex );
\r
8382 RtApi::tickStreamTime();
\r
8384 if ( doStopStream == 1 )
\r
8388 void RtApiPulse::startStream( void )
\r
8390 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8392 if ( stream_.state == STREAM_CLOSED ) {
\r
8393 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8394 error( RtAudioError::INVALID_USE );
\r
8397 if ( stream_.state == STREAM_RUNNING ) {
\r
8398 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8399 error( RtAudioError::WARNING );
\r
8403 MUTEX_LOCK( &stream_.mutex );
\r
8405 stream_.state = STREAM_RUNNING;
\r
8407 pah->runnable = true;
\r
8408 pthread_cond_signal( &pah->runnable_cv );
\r
8409 MUTEX_UNLOCK( &stream_.mutex );
\r
8412 void RtApiPulse::stopStream( void )
\r
8414 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8416 if ( stream_.state == STREAM_CLOSED ) {
\r
8417 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8418 error( RtAudioError::INVALID_USE );
\r
8421 if ( stream_.state == STREAM_STOPPED ) {
\r
8422 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8423 error( RtAudioError::WARNING );
\r
8427 stream_.state = STREAM_STOPPED;
\r
8428 MUTEX_LOCK( &stream_.mutex );
\r
8430 if ( pah && pah->s_play ) {
\r
8432 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8433 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8434 pa_strerror( pa_error ) << ".";
\r
8435 errorText_ = errorStream_.str();
\r
8436 MUTEX_UNLOCK( &stream_.mutex );
\r
8437 error( RtAudioError::SYSTEM_ERROR );
\r
8442 stream_.state = STREAM_STOPPED;
\r
8443 MUTEX_UNLOCK( &stream_.mutex );
\r
8446 void RtApiPulse::abortStream( void )
\r
8448 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8450 if ( stream_.state == STREAM_CLOSED ) {
\r
8451 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8452 error( RtAudioError::INVALID_USE );
\r
8455 if ( stream_.state == STREAM_STOPPED ) {
\r
8456 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8457 error( RtAudioError::WARNING );
\r
8461 stream_.state = STREAM_STOPPED;
\r
8462 MUTEX_LOCK( &stream_.mutex );
\r
8464 if ( pah && pah->s_play ) {
\r
8466 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8467 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8468 pa_strerror( pa_error ) << ".";
\r
8469 errorText_ = errorStream_.str();
\r
8470 MUTEX_UNLOCK( &stream_.mutex );
\r
8471 error( RtAudioError::SYSTEM_ERROR );
\r
8476 stream_.state = STREAM_STOPPED;
\r
8477 MUTEX_UNLOCK( &stream_.mutex );
\r
8480 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8481 unsigned int channels, unsigned int firstChannel,
\r
8482 unsigned int sampleRate, RtAudioFormat format,
\r
8483 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8485 PulseAudioHandle *pah = 0;
\r
8486 unsigned long bufferBytes = 0;
\r
8487 pa_sample_spec ss;
\r
8489 if ( device != 0 ) return false;
\r
8490 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8491 if ( channels != 1 && channels != 2 ) {
\r
8492 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8495 ss.channels = channels;
\r
8497 if ( firstChannel != 0 ) return false;
\r
8499 bool sr_found = false;
\r
8500 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8501 if ( sampleRate == *sr ) {
\r
8503 stream_.sampleRate = sampleRate;
\r
8504 ss.rate = sampleRate;
\r
8508 if ( !sr_found ) {
\r
8509 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8513 bool sf_found = 0;
\r
8514 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8515 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8516 if ( format == sf->rtaudio_format ) {
\r
8518 stream_.userFormat = sf->rtaudio_format;
\r
8519 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8520 ss.format = sf->pa_format;
\r
8524 if ( !sf_found ) { // Use internal data format conversion.
\r
8525 stream_.userFormat = format;
\r
8526 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8527 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8530 // Set other stream parameters.
\r
8531 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8532 else stream_.userInterleaved = true;
\r
8533 stream_.deviceInterleaved[mode] = true;
\r
8534 stream_.nBuffers = 1;
\r
8535 stream_.doByteSwap[mode] = false;
\r
8536 stream_.nUserChannels[mode] = channels;
\r
8537 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8538 stream_.channelOffset[mode] = 0;
\r
8539 std::string streamName = "RtAudio";
\r
8541 // Set flags for buffer conversion.
\r
8542 stream_.doConvertBuffer[mode] = false;
\r
8543 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8544 stream_.doConvertBuffer[mode] = true;
\r
8545 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8546 stream_.doConvertBuffer[mode] = true;
\r
8548 // Allocate necessary internal buffers.
\r
8549 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8550 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8551 if ( stream_.userBuffer[mode] == NULL ) {
\r
8552 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8555 stream_.bufferSize = *bufferSize;
\r
8557 if ( stream_.doConvertBuffer[mode] ) {
\r
8559 bool makeBuffer = true;
\r
8560 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8561 if ( mode == INPUT ) {
\r
8562 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8563 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8564 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8568 if ( makeBuffer ) {
\r
8569 bufferBytes *= *bufferSize;
\r
8570 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8571 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8572 if ( stream_.deviceBuffer == NULL ) {
\r
8573 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8579 stream_.device[mode] = device;
\r
8581 // Setup the buffer conversion information structure.
\r
8582 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8584 if ( !stream_.apiHandle ) {
\r
8585 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8587 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8591 stream_.apiHandle = pah;
\r
8592 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8593 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8597 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8600 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8603 pa_buffer_attr buffer_attr;
\r
8604 buffer_attr.fragsize = bufferBytes;
\r
8605 buffer_attr.maxlength = -1;
\r
8607 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8608 if ( !pah->s_rec ) {
\r
8609 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8614 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8615 if ( !pah->s_play ) {
\r
8616 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8624 if ( stream_.mode == UNINITIALIZED )
\r
8625 stream_.mode = mode;
\r
8626 else if ( stream_.mode == mode )
\r
8629 stream_.mode = DUPLEX;
\r
8631 if ( !stream_.callbackInfo.isRunning ) {
\r
8632 stream_.callbackInfo.object = this;
\r
8633 stream_.callbackInfo.isRunning = true;
\r
8634 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8635 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8640 stream_.state = STREAM_STOPPED;
\r
8644 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8645 pthread_cond_destroy( &pah->runnable_cv );
\r
8647 stream_.apiHandle = 0;
\r
8650 for ( int i=0; i<2; i++ ) {
\r
8651 if ( stream_.userBuffer[i] ) {
\r
8652 free( stream_.userBuffer[i] );
\r
8653 stream_.userBuffer[i] = 0;
\r
8657 if ( stream_.deviceBuffer ) {
\r
8658 free( stream_.deviceBuffer );
\r
8659 stream_.deviceBuffer = 0;
\r
8665 //******************** End of __LINUX_PULSE__ *********************//
\r
8668 #if defined(__LINUX_OSS__)
\r
8670 #include <unistd.h>
\r
8671 #include <sys/ioctl.h>
\r
8672 #include <unistd.h>
\r
8673 #include <fcntl.h>
\r
8674 #include <sys/soundcard.h>
\r
8675 #include <errno.h>
\r
8678 static void *ossCallbackHandler(void * ptr);
\r
8680 // A structure to hold various information related to the OSS API
\r
8681 // implementation.
\r
8682 struct OssHandle {
\r
8683 int id[2]; // device ids
\r
8686 pthread_cond_t runnable;
\r
8689 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8692 RtApiOss :: RtApiOss()
\r
8694 // Nothing to do here.
\r
8697 RtApiOss :: ~RtApiOss()
\r
8699 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8702 unsigned int RtApiOss :: getDeviceCount( void )
\r
8704 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8705 if ( mixerfd == -1 ) {
\r
8706 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8707 error( RtAudioError::WARNING );
\r
8711 oss_sysinfo sysinfo;
\r
8712 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8714 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8715 error( RtAudioError::WARNING );
\r
8720 return sysinfo.numaudios;
\r
8723 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8725 RtAudio::DeviceInfo info;
\r
8726 info.probed = false;
\r
8728 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8729 if ( mixerfd == -1 ) {
\r
8730 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8731 error( RtAudioError::WARNING );
\r
8735 oss_sysinfo sysinfo;
\r
8736 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8737 if ( result == -1 ) {
\r
8739 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8740 error( RtAudioError::WARNING );
\r
8744 unsigned nDevices = sysinfo.numaudios;
\r
8745 if ( nDevices == 0 ) {
\r
8747 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8748 error( RtAudioError::INVALID_USE );
\r
8752 if ( device >= nDevices ) {
\r
8754 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8755 error( RtAudioError::INVALID_USE );
\r
8759 oss_audioinfo ainfo;
\r
8760 ainfo.dev = device;
\r
8761 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8763 if ( result == -1 ) {
\r
8764 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8765 errorText_ = errorStream_.str();
\r
8766 error( RtAudioError::WARNING );
\r
8771 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8772 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8773 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8774 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8775 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8778 // Probe data formats ... do for input
\r
8779 unsigned long mask = ainfo.iformats;
\r
8780 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8781 info.nativeFormats |= RTAUDIO_SINT16;
\r
8782 if ( mask & AFMT_S8 )
\r
8783 info.nativeFormats |= RTAUDIO_SINT8;
\r
8784 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8785 info.nativeFormats |= RTAUDIO_SINT32;
\r
8786 if ( mask & AFMT_FLOAT )
\r
8787 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8788 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8789 info.nativeFormats |= RTAUDIO_SINT24;
\r
8791 // Check that we have at least one supported format
\r
8792 if ( info.nativeFormats == 0 ) {
\r
8793 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8794 errorText_ = errorStream_.str();
\r
8795 error( RtAudioError::WARNING );
\r
8799 // Probe the supported sample rates.
\r
8800 info.sampleRates.clear();
\r
8801 if ( ainfo.nrates ) {
\r
8802 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8803 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8804 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8805 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8807 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8808 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8816 // Check min and max rate values;
\r
8817 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8818 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8819 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8821 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8822 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8827 if ( info.sampleRates.size() == 0 ) {
\r
8828 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8829 errorText_ = errorStream_.str();
\r
8830 error( RtAudioError::WARNING );
\r
8833 info.probed = true;
\r
8834 info.name = ainfo.name;
\r
8841 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8842 unsigned int firstChannel, unsigned int sampleRate,
\r
8843 RtAudioFormat format, unsigned int *bufferSize,
\r
8844 RtAudio::StreamOptions *options )
\r
8846 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8847 if ( mixerfd == -1 ) {
\r
8848 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8852 oss_sysinfo sysinfo;
\r
8853 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8854 if ( result == -1 ) {
\r
8856 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8860 unsigned nDevices = sysinfo.numaudios;
\r
8861 if ( nDevices == 0 ) {
\r
8862 // This should not happen because a check is made before this function is called.
\r
8864 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8868 if ( device >= nDevices ) {
\r
8869 // This should not happen because a check is made before this function is called.
\r
8871 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8875 oss_audioinfo ainfo;
\r
8876 ainfo.dev = device;
\r
8877 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8879 if ( result == -1 ) {
\r
8880 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8881 errorText_ = errorStream_.str();
\r
8885 // Check if device supports input or output
\r
8886 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8887 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8888 if ( mode == OUTPUT )
\r
8889 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8891 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8892 errorText_ = errorStream_.str();
\r
8897 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8898 if ( mode == OUTPUT )
\r
8899 flags |= O_WRONLY;
\r
8900 else { // mode == INPUT
\r
8901 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8902 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8903 close( handle->id[0] );
\r
8904 handle->id[0] = 0;
\r
8905 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8906 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8907 errorText_ = errorStream_.str();
\r
8910 // Check that the number previously set channels is the same.
\r
8911 if ( stream_.nUserChannels[0] != channels ) {
\r
8912 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8913 errorText_ = errorStream_.str();
\r
8919 flags |= O_RDONLY;
\r
8922 // Set exclusive access if specified.
\r
8923 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8925 // Try to open the device.
\r
8927 fd = open( ainfo.devnode, flags, 0 );
\r
8929 if ( errno == EBUSY )
\r
8930 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8932 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8933 errorText_ = errorStream_.str();
\r
8937 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8939 if ( flags | O_RDWR ) {
\r
8940 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8941 if ( result == -1) {
\r
8942 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8943 errorText_ = errorStream_.str();
\r
8949 // Check the device channel support.
\r
8950 stream_.nUserChannels[mode] = channels;
\r
8951 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8953 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8954 errorText_ = errorStream_.str();
\r
8958 // Set the number of channels.
\r
8959 int deviceChannels = channels + firstChannel;
\r
8960 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8961 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8963 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8964 errorText_ = errorStream_.str();
\r
8967 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8969 // Get the data format mask
\r
8971 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8972 if ( result == -1 ) {
\r
8974 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8975 errorText_ = errorStream_.str();
\r
8979 // Determine how to set the device format.
\r
8980 stream_.userFormat = format;
\r
8981 int deviceFormat = -1;
\r
8982 stream_.doByteSwap[mode] = false;
\r
8983 if ( format == RTAUDIO_SINT8 ) {
\r
8984 if ( mask & AFMT_S8 ) {
\r
8985 deviceFormat = AFMT_S8;
\r
8986 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8989 else if ( format == RTAUDIO_SINT16 ) {
\r
8990 if ( mask & AFMT_S16_NE ) {
\r
8991 deviceFormat = AFMT_S16_NE;
\r
8992 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8994 else if ( mask & AFMT_S16_OE ) {
\r
8995 deviceFormat = AFMT_S16_OE;
\r
8996 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8997 stream_.doByteSwap[mode] = true;
\r
9000 else if ( format == RTAUDIO_SINT24 ) {
\r
9001 if ( mask & AFMT_S24_NE ) {
\r
9002 deviceFormat = AFMT_S24_NE;
\r
9003 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9005 else if ( mask & AFMT_S24_OE ) {
\r
9006 deviceFormat = AFMT_S24_OE;
\r
9007 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9008 stream_.doByteSwap[mode] = true;
\r
9011 else if ( format == RTAUDIO_SINT32 ) {
\r
9012 if ( mask & AFMT_S32_NE ) {
\r
9013 deviceFormat = AFMT_S32_NE;
\r
9014 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9016 else if ( mask & AFMT_S32_OE ) {
\r
9017 deviceFormat = AFMT_S32_OE;
\r
9018 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9019 stream_.doByteSwap[mode] = true;
\r
9023 if ( deviceFormat == -1 ) {
\r
9024 // The user requested format is not natively supported by the device.
\r
9025 if ( mask & AFMT_S16_NE ) {
\r
9026 deviceFormat = AFMT_S16_NE;
\r
9027 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9029 else if ( mask & AFMT_S32_NE ) {
\r
9030 deviceFormat = AFMT_S32_NE;
\r
9031 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9033 else if ( mask & AFMT_S24_NE ) {
\r
9034 deviceFormat = AFMT_S24_NE;
\r
9035 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9037 else if ( mask & AFMT_S16_OE ) {
\r
9038 deviceFormat = AFMT_S16_OE;
\r
9039 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9040 stream_.doByteSwap[mode] = true;
\r
9042 else if ( mask & AFMT_S32_OE ) {
\r
9043 deviceFormat = AFMT_S32_OE;
\r
9044 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9045 stream_.doByteSwap[mode] = true;
\r
9047 else if ( mask & AFMT_S24_OE ) {
\r
9048 deviceFormat = AFMT_S24_OE;
\r
9049 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9050 stream_.doByteSwap[mode] = true;
\r
9052 else if ( mask & AFMT_S8) {
\r
9053 deviceFormat = AFMT_S8;
\r
9054 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9058 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9059 // This really shouldn't happen ...
\r
9061 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9062 errorText_ = errorStream_.str();
\r
9066 // Set the data format.
\r
9067 int temp = deviceFormat;
\r
9068 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9069 if ( result == -1 || deviceFormat != temp ) {
\r
9071 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9072 errorText_ = errorStream_.str();
\r
9076 // Attempt to set the buffer size. According to OSS, the minimum
\r
9077 // number of buffers is two. The supposed minimum buffer size is 16
\r
9078 // bytes, so that will be our lower bound. The argument to this
\r
9079 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9080 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9081 // We'll check the actual value used near the end of the setup
\r
9083 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9084 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9086 if ( options ) buffers = options->numberOfBuffers;
\r
9087 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9088 if ( buffers < 2 ) buffers = 3;
\r
9089 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9090 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9091 if ( result == -1 ) {
\r
9093 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9094 errorText_ = errorStream_.str();
\r
9097 stream_.nBuffers = buffers;
\r
9099 // Save buffer size (in sample frames).
\r
9100 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9101 stream_.bufferSize = *bufferSize;
\r
9103 // Set the sample rate.
\r
9104 int srate = sampleRate;
\r
9105 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9106 if ( result == -1 ) {
\r
9108 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9109 errorText_ = errorStream_.str();
\r
9113 // Verify the sample rate setup worked.
\r
9114 if ( abs( srate - sampleRate ) > 100 ) {
\r
9116 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9117 errorText_ = errorStream_.str();
\r
9120 stream_.sampleRate = sampleRate;
\r
9122 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9123 // We're doing duplex setup here.
\r
9124 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9125 stream_.nDeviceChannels[0] = deviceChannels;
\r
9128 // Set interleaving parameters.
\r
9129 stream_.userInterleaved = true;
\r
9130 stream_.deviceInterleaved[mode] = true;
\r
9131 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9132 stream_.userInterleaved = false;
\r
9134 // Set flags for buffer conversion
\r
9135 stream_.doConvertBuffer[mode] = false;
\r
9136 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9137 stream_.doConvertBuffer[mode] = true;
\r
9138 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9139 stream_.doConvertBuffer[mode] = true;
\r
9140 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9141 stream_.nUserChannels[mode] > 1 )
\r
9142 stream_.doConvertBuffer[mode] = true;
\r
9144 // Allocate the stream handles if necessary and then save.
\r
9145 if ( stream_.apiHandle == 0 ) {
\r
9147 handle = new OssHandle;
\r
9149 catch ( std::bad_alloc& ) {
\r
9150 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9154 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9155 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9159 stream_.apiHandle = (void *) handle;
\r
9162 handle = (OssHandle *) stream_.apiHandle;
\r
9164 handle->id[mode] = fd;
\r
9166 // Allocate necessary internal buffers.
\r
9167 unsigned long bufferBytes;
\r
9168 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9169 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9170 if ( stream_.userBuffer[mode] == NULL ) {
\r
9171 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9175 if ( stream_.doConvertBuffer[mode] ) {
\r
9177 bool makeBuffer = true;
\r
9178 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9179 if ( mode == INPUT ) {
\r
9180 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9181 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9182 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9186 if ( makeBuffer ) {
\r
9187 bufferBytes *= *bufferSize;
\r
9188 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9189 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9190 if ( stream_.deviceBuffer == NULL ) {
\r
9191 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9197 stream_.device[mode] = device;
\r
9198 stream_.state = STREAM_STOPPED;
\r
9200 // Setup the buffer conversion information structure.
\r
9201 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9203 // Setup thread if necessary.
\r
9204 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9205 // We had already set up an output stream.
\r
9206 stream_.mode = DUPLEX;
\r
9207 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9210 stream_.mode = mode;
\r
9212 // Setup callback thread.
\r
9213 stream_.callbackInfo.object = (void *) this;
\r
9215 // Set the thread attributes for joinable and realtime scheduling
\r
9216 // priority. The higher priority will only take affect if the
\r
9217 // program is run as root or suid.
\r
9218 pthread_attr_t attr;
\r
9219 pthread_attr_init( &attr );
\r
9220 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9221 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9222 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9223 struct sched_param param;
\r
9224 int priority = options->priority;
\r
9225 int min = sched_get_priority_min( SCHED_RR );
\r
9226 int max = sched_get_priority_max( SCHED_RR );
\r
9227 if ( priority < min ) priority = min;
\r
9228 else if ( priority > max ) priority = max;
\r
9229 param.sched_priority = priority;
\r
9230 pthread_attr_setschedparam( &attr, ¶m );
\r
9231 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9234 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9236 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9239 stream_.callbackInfo.isRunning = true;
\r
9240 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9241 pthread_attr_destroy( &attr );
\r
9243 stream_.callbackInfo.isRunning = false;
\r
9244 errorText_ = "RtApiOss::error creating callback thread!";
\r
9253 pthread_cond_destroy( &handle->runnable );
\r
9254 if ( handle->id[0] ) close( handle->id[0] );
\r
9255 if ( handle->id[1] ) close( handle->id[1] );
\r
9257 stream_.apiHandle = 0;
\r
9260 for ( int i=0; i<2; i++ ) {
\r
9261 if ( stream_.userBuffer[i] ) {
\r
9262 free( stream_.userBuffer[i] );
\r
9263 stream_.userBuffer[i] = 0;
\r
9267 if ( stream_.deviceBuffer ) {
\r
9268 free( stream_.deviceBuffer );
\r
9269 stream_.deviceBuffer = 0;
\r
9275 void RtApiOss :: closeStream()
\r
9277 if ( stream_.state == STREAM_CLOSED ) {
\r
9278 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9279 error( RtAudioError::WARNING );
\r
9283 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9284 stream_.callbackInfo.isRunning = false;
\r
9285 MUTEX_LOCK( &stream_.mutex );
\r
9286 if ( stream_.state == STREAM_STOPPED )
\r
9287 pthread_cond_signal( &handle->runnable );
\r
9288 MUTEX_UNLOCK( &stream_.mutex );
\r
9289 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9291 if ( stream_.state == STREAM_RUNNING ) {
\r
9292 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9293 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9295 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9296 stream_.state = STREAM_STOPPED;
\r
9300 pthread_cond_destroy( &handle->runnable );
\r
9301 if ( handle->id[0] ) close( handle->id[0] );
\r
9302 if ( handle->id[1] ) close( handle->id[1] );
\r
9304 stream_.apiHandle = 0;
\r
9307 for ( int i=0; i<2; i++ ) {
\r
9308 if ( stream_.userBuffer[i] ) {
\r
9309 free( stream_.userBuffer[i] );
\r
9310 stream_.userBuffer[i] = 0;
\r
9314 if ( stream_.deviceBuffer ) {
\r
9315 free( stream_.deviceBuffer );
\r
9316 stream_.deviceBuffer = 0;
\r
9319 stream_.mode = UNINITIALIZED;
\r
9320 stream_.state = STREAM_CLOSED;
\r
9323 void RtApiOss :: startStream()
\r
9326 if ( stream_.state == STREAM_RUNNING ) {
\r
9327 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9328 error( RtAudioError::WARNING );
\r
9332 MUTEX_LOCK( &stream_.mutex );
\r
9334 stream_.state = STREAM_RUNNING;
\r
9336 // No need to do anything else here ... OSS automatically starts
\r
9337 // when fed samples.
\r
9339 MUTEX_UNLOCK( &stream_.mutex );
\r
9341 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9342 pthread_cond_signal( &handle->runnable );
\r
9345 void RtApiOss :: stopStream()
\r
9348 if ( stream_.state == STREAM_STOPPED ) {
\r
9349 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9350 error( RtAudioError::WARNING );
\r
9354 MUTEX_LOCK( &stream_.mutex );
\r
9356 // The state might change while waiting on a mutex.
\r
9357 if ( stream_.state == STREAM_STOPPED ) {
\r
9358 MUTEX_UNLOCK( &stream_.mutex );
\r
9363 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9364 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9366 // Flush the output with zeros a few times.
\r
9369 RtAudioFormat format;
\r
9371 if ( stream_.doConvertBuffer[0] ) {
\r
9372 buffer = stream_.deviceBuffer;
\r
9373 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9374 format = stream_.deviceFormat[0];
\r
9377 buffer = stream_.userBuffer[0];
\r
9378 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9379 format = stream_.userFormat;
\r
9382 memset( buffer, 0, samples * formatBytes(format) );
\r
9383 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9384 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9385 if ( result == -1 ) {
\r
9386 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9387 error( RtAudioError::WARNING );
\r
9391 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9392 if ( result == -1 ) {
\r
9393 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9394 errorText_ = errorStream_.str();
\r
9397 handle->triggered = false;
\r
9400 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9401 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9402 if ( result == -1 ) {
\r
9403 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9404 errorText_ = errorStream_.str();
\r
9410 stream_.state = STREAM_STOPPED;
\r
9411 MUTEX_UNLOCK( &stream_.mutex );
\r
9413 if ( result != -1 ) return;
\r
9414 error( RtAudioError::SYSTEM_ERROR );
\r
9417 void RtApiOss :: abortStream()
\r
9420 if ( stream_.state == STREAM_STOPPED ) {
\r
9421 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9422 error( RtAudioError::WARNING );
\r
9426 MUTEX_LOCK( &stream_.mutex );
\r
9428 // The state might change while waiting on a mutex.
\r
9429 if ( stream_.state == STREAM_STOPPED ) {
\r
9430 MUTEX_UNLOCK( &stream_.mutex );
\r
9435 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9436 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9437 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9438 if ( result == -1 ) {
\r
9439 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9440 errorText_ = errorStream_.str();
\r
9443 handle->triggered = false;
\r
9446 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9447 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9448 if ( result == -1 ) {
\r
9449 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9450 errorText_ = errorStream_.str();
\r
9456 stream_.state = STREAM_STOPPED;
\r
9457 MUTEX_UNLOCK( &stream_.mutex );
\r
9459 if ( result != -1 ) return;
\r
9460 error( RtAudioError::SYSTEM_ERROR );
\r
9463 void RtApiOss :: callbackEvent()
\r
9465 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9466 if ( stream_.state == STREAM_STOPPED ) {
\r
9467 MUTEX_LOCK( &stream_.mutex );
\r
9468 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9469 if ( stream_.state != STREAM_RUNNING ) {
\r
9470 MUTEX_UNLOCK( &stream_.mutex );
\r
9473 MUTEX_UNLOCK( &stream_.mutex );
\r
9476 if ( stream_.state == STREAM_CLOSED ) {
\r
9477 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9478 error( RtAudioError::WARNING );
\r
9482 // Invoke user callback to get fresh output data.
\r
9483 int doStopStream = 0;
\r
9484 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9485 double streamTime = getStreamTime();
\r
9486 RtAudioStreamStatus status = 0;
\r
9487 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9488 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9489 handle->xrun[0] = false;
\r
9491 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9492 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9493 handle->xrun[1] = false;
\r
9495 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9496 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9497 if ( doStopStream == 2 ) {
\r
9498 this->abortStream();
\r
9502 MUTEX_LOCK( &stream_.mutex );
\r
9504 // The state might change while waiting on a mutex.
\r
9505 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9510 RtAudioFormat format;
\r
9512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9514 // Setup parameters and do buffer conversion if necessary.
\r
9515 if ( stream_.doConvertBuffer[0] ) {
\r
9516 buffer = stream_.deviceBuffer;
\r
9517 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9518 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9519 format = stream_.deviceFormat[0];
\r
9522 buffer = stream_.userBuffer[0];
\r
9523 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9524 format = stream_.userFormat;
\r
9527 // Do byte swapping if necessary.
\r
9528 if ( stream_.doByteSwap[0] )
\r
9529 byteSwapBuffer( buffer, samples, format );
\r
9531 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9533 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9534 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9535 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9536 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9537 handle->triggered = true;
\r
9540 // Write samples to device.
\r
9541 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9543 if ( result == -1 ) {
\r
9544 // We'll assume this is an underrun, though there isn't a
\r
9545 // specific means for determining that.
\r
9546 handle->xrun[0] = true;
\r
9547 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9548 error( RtAudioError::WARNING );
\r
9549 // Continue on to input section.
\r
9553 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9555 // Setup parameters.
\r
9556 if ( stream_.doConvertBuffer[1] ) {
\r
9557 buffer = stream_.deviceBuffer;
\r
9558 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9559 format = stream_.deviceFormat[1];
\r
9562 buffer = stream_.userBuffer[1];
\r
9563 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9564 format = stream_.userFormat;
\r
9567 // Read samples from device.
\r
9568 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9570 if ( result == -1 ) {
\r
9571 // We'll assume this is an overrun, though there isn't a
\r
9572 // specific means for determining that.
\r
9573 handle->xrun[1] = true;
\r
9574 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9575 error( RtAudioError::WARNING );
\r
9579 // Do byte swapping if necessary.
\r
9580 if ( stream_.doByteSwap[1] )
\r
9581 byteSwapBuffer( buffer, samples, format );
\r
9583 // Do buffer conversion if necessary.
\r
9584 if ( stream_.doConvertBuffer[1] )
\r
9585 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9589 MUTEX_UNLOCK( &stream_.mutex );
\r
9591 RtApi::tickStreamTime();
\r
9592 if ( doStopStream == 1 ) this->stopStream();
\r
9595 static void *ossCallbackHandler( void *ptr )
\r
9597 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9598 RtApiOss *object = (RtApiOss *) info->object;
\r
9599 bool *isRunning = &info->isRunning;
\r
9601 while ( *isRunning == true ) {
\r
9602 pthread_testcancel();
\r
9603 object->callbackEvent();
\r
9606 pthread_exit( NULL );
\r
9609 //******************** End of __LINUX_OSS__ *********************//
\r
9613 // *************************************************** //
\r
9615 // Protected common (OS-independent) RtAudio methods.
\r
9617 // *************************************************** //
\r
9619 // This method can be modified to control the behavior of error
\r
9620 // message printing.
\r
9621 void RtApi :: error( RtAudioError::Type type )
\r
9623 errorStream_.str(""); // clear the ostringstream
\r
9625 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9626 if ( errorCallback ) {
\r
9627 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9629 if ( firstErrorOccurred_ )
\r
9632 firstErrorOccurred_ = true;
\r
9633 const std::string errorMessage = errorText_;
\r
9635 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9636 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9640 errorCallback( type, errorMessage );
\r
9641 firstErrorOccurred_ = false;
\r
9645 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9646 std::cerr << '\n' << errorText_ << "\n\n";
\r
9647 else if ( type != RtAudioError::WARNING )
\r
9648 throw( RtAudioError( errorText_, type ) );
\r
9651 void RtApi :: verifyStream()
\r
9653 if ( stream_.state == STREAM_CLOSED ) {
\r
9654 errorText_ = "RtApi:: a stream is not open!";
\r
9655 error( RtAudioError::INVALID_USE );
\r
9659 void RtApi :: clearStreamInfo()
\r
9661 stream_.mode = UNINITIALIZED;
\r
9662 stream_.state = STREAM_CLOSED;
\r
9663 stream_.sampleRate = 0;
\r
9664 stream_.bufferSize = 0;
\r
9665 stream_.nBuffers = 0;
\r
9666 stream_.userFormat = 0;
\r
9667 stream_.userInterleaved = true;
\r
9668 stream_.streamTime = 0.0;
\r
9669 stream_.apiHandle = 0;
\r
9670 stream_.deviceBuffer = 0;
\r
9671 stream_.callbackInfo.callback = 0;
\r
9672 stream_.callbackInfo.userData = 0;
\r
9673 stream_.callbackInfo.isRunning = false;
\r
9674 stream_.callbackInfo.errorCallback = 0;
\r
9675 for ( int i=0; i<2; i++ ) {
\r
9676 stream_.device[i] = 11111;
\r
9677 stream_.doConvertBuffer[i] = false;
\r
9678 stream_.deviceInterleaved[i] = true;
\r
9679 stream_.doByteSwap[i] = false;
\r
9680 stream_.nUserChannels[i] = 0;
\r
9681 stream_.nDeviceChannels[i] = 0;
\r
9682 stream_.channelOffset[i] = 0;
\r
9683 stream_.deviceFormat[i] = 0;
\r
9684 stream_.latency[i] = 0;
\r
9685 stream_.userBuffer[i] = 0;
\r
9686 stream_.convertInfo[i].channels = 0;
\r
9687 stream_.convertInfo[i].inJump = 0;
\r
9688 stream_.convertInfo[i].outJump = 0;
\r
9689 stream_.convertInfo[i].inFormat = 0;
\r
9690 stream_.convertInfo[i].outFormat = 0;
\r
9691 stream_.convertInfo[i].inOffset.clear();
\r
9692 stream_.convertInfo[i].outOffset.clear();
\r
9696 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9698 if ( format == RTAUDIO_SINT16 )
\r
9700 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9702 else if ( format == RTAUDIO_FLOAT64 )
\r
9704 else if ( format == RTAUDIO_SINT24 )
\r
9706 else if ( format == RTAUDIO_SINT8 )
\r
9709 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9710 error( RtAudioError::WARNING );
\r
9715 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9717 if ( mode == INPUT ) { // convert device to user buffer
\r
9718 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9719 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9720 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9721 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9723 else { // convert user to device buffer
\r
9724 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9725 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9726 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9727 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9730 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9731 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9733 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9735 // Set up the interleave/deinterleave offsets.
\r
9736 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9737 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9738 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9739 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9740 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9741 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9742 stream_.convertInfo[mode].inJump = 1;
\r
9746 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9747 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9748 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9749 stream_.convertInfo[mode].outJump = 1;
\r
9753 else { // no (de)interleaving
\r
9754 if ( stream_.userInterleaved ) {
\r
9755 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9756 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9757 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9761 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9762 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9763 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9764 stream_.convertInfo[mode].inJump = 1;
\r
9765 stream_.convertInfo[mode].outJump = 1;
\r
9770 // Add channel offset.
\r
9771 if ( firstChannel > 0 ) {
\r
9772 if ( stream_.deviceInterleaved[mode] ) {
\r
9773 if ( mode == OUTPUT ) {
\r
9774 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9775 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9778 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9779 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9783 if ( mode == OUTPUT ) {
\r
9784 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9785 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9788 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9789 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9795 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9797 // This function does format conversion, input/output channel compensation, and
\r
9798 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9799 // the lower three bytes of a 32-bit integer.
\r
9801 // Clear our device buffer when in/out duplex device channels are different
\r
9802 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9803 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9804 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9807 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9809 Float64 *out = (Float64 *)outBuffer;
\r
9811 if (info.inFormat == RTAUDIO_SINT8) {
\r
9812 signed char *in = (signed char *)inBuffer;
\r
9813 scale = 1.0 / 127.5;
\r
9814 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9815 for (j=0; j<info.channels; j++) {
\r
9816 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9817 out[info.outOffset[j]] += 0.5;
\r
9818 out[info.outOffset[j]] *= scale;
\r
9820 in += info.inJump;
\r
9821 out += info.outJump;
\r
9824 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9825 Int16 *in = (Int16 *)inBuffer;
\r
9826 scale = 1.0 / 32767.5;
\r
9827 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9828 for (j=0; j<info.channels; j++) {
\r
9829 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9830 out[info.outOffset[j]] += 0.5;
\r
9831 out[info.outOffset[j]] *= scale;
\r
9833 in += info.inJump;
\r
9834 out += info.outJump;
\r
9837 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9838 Int24 *in = (Int24 *)inBuffer;
\r
9839 scale = 1.0 / 8388607.5;
\r
9840 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9841 for (j=0; j<info.channels; j++) {
\r
9842 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9843 out[info.outOffset[j]] += 0.5;
\r
9844 out[info.outOffset[j]] *= scale;
\r
9846 in += info.inJump;
\r
9847 out += info.outJump;
\r
9850 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9851 Int32 *in = (Int32 *)inBuffer;
\r
9852 scale = 1.0 / 2147483647.5;
\r
9853 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9854 for (j=0; j<info.channels; j++) {
\r
9855 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9856 out[info.outOffset[j]] += 0.5;
\r
9857 out[info.outOffset[j]] *= scale;
\r
9859 in += info.inJump;
\r
9860 out += info.outJump;
\r
9863 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9864 Float32 *in = (Float32 *)inBuffer;
\r
9865 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9866 for (j=0; j<info.channels; j++) {
\r
9867 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9869 in += info.inJump;
\r
9870 out += info.outJump;
\r
9873 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9874 // Channel compensation and/or (de)interleaving only.
\r
9875 Float64 *in = (Float64 *)inBuffer;
\r
9876 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9877 for (j=0; j<info.channels; j++) {
\r
9878 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9880 in += info.inJump;
\r
9881 out += info.outJump;
\r
9885 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9887 Float32 *out = (Float32 *)outBuffer;
\r
9889 if (info.inFormat == RTAUDIO_SINT8) {
\r
9890 signed char *in = (signed char *)inBuffer;
\r
9891 scale = (Float32) ( 1.0 / 127.5 );
\r
9892 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9893 for (j=0; j<info.channels; j++) {
\r
9894 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9895 out[info.outOffset[j]] += 0.5;
\r
9896 out[info.outOffset[j]] *= scale;
\r
9898 in += info.inJump;
\r
9899 out += info.outJump;
\r
9902 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9903 Int16 *in = (Int16 *)inBuffer;
\r
9904 scale = (Float32) ( 1.0 / 32767.5 );
\r
9905 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9906 for (j=0; j<info.channels; j++) {
\r
9907 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9908 out[info.outOffset[j]] += 0.5;
\r
9909 out[info.outOffset[j]] *= scale;
\r
9911 in += info.inJump;
\r
9912 out += info.outJump;
\r
9915 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9916 Int24 *in = (Int24 *)inBuffer;
\r
9917 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9918 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9919 for (j=0; j<info.channels; j++) {
\r
9920 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9921 out[info.outOffset[j]] += 0.5;
\r
9922 out[info.outOffset[j]] *= scale;
\r
9924 in += info.inJump;
\r
9925 out += info.outJump;
\r
9928 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9929 Int32 *in = (Int32 *)inBuffer;
\r
9930 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9931 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9932 for (j=0; j<info.channels; j++) {
\r
9933 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9934 out[info.outOffset[j]] += 0.5;
\r
9935 out[info.outOffset[j]] *= scale;
\r
9937 in += info.inJump;
\r
9938 out += info.outJump;
\r
9941 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9942 // Channel compensation and/or (de)interleaving only.
\r
9943 Float32 *in = (Float32 *)inBuffer;
\r
9944 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9945 for (j=0; j<info.channels; j++) {
\r
9946 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9948 in += info.inJump;
\r
9949 out += info.outJump;
\r
9952 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9953 Float64 *in = (Float64 *)inBuffer;
\r
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9955 for (j=0; j<info.channels; j++) {
\r
9956 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9958 in += info.inJump;
\r
9959 out += info.outJump;
\r
9963 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9964 Int32 *out = (Int32 *)outBuffer;
\r
9965 if (info.inFormat == RTAUDIO_SINT8) {
\r
9966 signed char *in = (signed char *)inBuffer;
\r
9967 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9968 for (j=0; j<info.channels; j++) {
\r
9969 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9970 out[info.outOffset[j]] <<= 24;
\r
9972 in += info.inJump;
\r
9973 out += info.outJump;
\r
9976 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9977 Int16 *in = (Int16 *)inBuffer;
\r
9978 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9979 for (j=0; j<info.channels; j++) {
\r
9980 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9981 out[info.outOffset[j]] <<= 16;
\r
9983 in += info.inJump;
\r
9984 out += info.outJump;
\r
9987 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9988 Int24 *in = (Int24 *)inBuffer;
\r
9989 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9990 for (j=0; j<info.channels; j++) {
\r
9991 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9992 out[info.outOffset[j]] <<= 8;
\r
9994 in += info.inJump;
\r
9995 out += info.outJump;
\r
9998 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9999 // Channel compensation and/or (de)interleaving only.
\r
10000 Int32 *in = (Int32 *)inBuffer;
\r
10001 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10002 for (j=0; j<info.channels; j++) {
\r
10003 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10005 in += info.inJump;
\r
10006 out += info.outJump;
\r
10009 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10010 Float32 *in = (Float32 *)inBuffer;
\r
10011 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10012 for (j=0; j<info.channels; j++) {
\r
10013 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10015 in += info.inJump;
\r
10016 out += info.outJump;
\r
10019 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10020 Float64 *in = (Float64 *)inBuffer;
\r
10021 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10022 for (j=0; j<info.channels; j++) {
\r
10023 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10025 in += info.inJump;
\r
10026 out += info.outJump;
\r
10030 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10031 Int24 *out = (Int24 *)outBuffer;
\r
10032 if (info.inFormat == RTAUDIO_SINT8) {
\r
10033 signed char *in = (signed char *)inBuffer;
\r
10034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10035 for (j=0; j<info.channels; j++) {
\r
10036 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10037 //out[info.outOffset[j]] <<= 16;
\r
10039 in += info.inJump;
\r
10040 out += info.outJump;
\r
10043 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10044 Int16 *in = (Int16 *)inBuffer;
\r
10045 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10046 for (j=0; j<info.channels; j++) {
\r
10047 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10048 //out[info.outOffset[j]] <<= 8;
\r
10050 in += info.inJump;
\r
10051 out += info.outJump;
\r
10054 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10055 // Channel compensation and/or (de)interleaving only.
\r
10056 Int24 *in = (Int24 *)inBuffer;
\r
10057 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10058 for (j=0; j<info.channels; j++) {
\r
10059 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10061 in += info.inJump;
\r
10062 out += info.outJump;
\r
10065 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10066 Int32 *in = (Int32 *)inBuffer;
\r
10067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10068 for (j=0; j<info.channels; j++) {
\r
10069 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10070 //out[info.outOffset[j]] >>= 8;
\r
10072 in += info.inJump;
\r
10073 out += info.outJump;
\r
10076 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10077 Float32 *in = (Float32 *)inBuffer;
\r
10078 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10079 for (j=0; j<info.channels; j++) {
\r
10080 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10082 in += info.inJump;
\r
10083 out += info.outJump;
\r
10086 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10087 Float64 *in = (Float64 *)inBuffer;
\r
10088 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10089 for (j=0; j<info.channels; j++) {
\r
10090 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10092 in += info.inJump;
\r
10093 out += info.outJump;
\r
10097 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10098 Int16 *out = (Int16 *)outBuffer;
\r
10099 if (info.inFormat == RTAUDIO_SINT8) {
\r
10100 signed char *in = (signed char *)inBuffer;
\r
10101 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10102 for (j=0; j<info.channels; j++) {
\r
10103 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10104 out[info.outOffset[j]] <<= 8;
\r
10106 in += info.inJump;
\r
10107 out += info.outJump;
\r
10110 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10111 // Channel compensation and/or (de)interleaving only.
\r
10112 Int16 *in = (Int16 *)inBuffer;
\r
10113 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10114 for (j=0; j<info.channels; j++) {
\r
10115 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10117 in += info.inJump;
\r
10118 out += info.outJump;
\r
10121 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10122 Int24 *in = (Int24 *)inBuffer;
\r
10123 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10124 for (j=0; j<info.channels; j++) {
\r
10125 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10127 in += info.inJump;
\r
10128 out += info.outJump;
\r
10131 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10132 Int32 *in = (Int32 *)inBuffer;
\r
10133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10134 for (j=0; j<info.channels; j++) {
\r
10135 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10137 in += info.inJump;
\r
10138 out += info.outJump;
\r
10141 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10142 Float32 *in = (Float32 *)inBuffer;
\r
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10144 for (j=0; j<info.channels; j++) {
\r
10145 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10147 in += info.inJump;
\r
10148 out += info.outJump;
\r
10151 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10152 Float64 *in = (Float64 *)inBuffer;
\r
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10154 for (j=0; j<info.channels; j++) {
\r
10155 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10157 in += info.inJump;
\r
10158 out += info.outJump;
\r
10162 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10163 signed char *out = (signed char *)outBuffer;
\r
10164 if (info.inFormat == RTAUDIO_SINT8) {
\r
10165 // Channel compensation and/or (de)interleaving only.
\r
10166 signed char *in = (signed char *)inBuffer;
\r
10167 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10168 for (j=0; j<info.channels; j++) {
\r
10169 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10171 in += info.inJump;
\r
10172 out += info.outJump;
\r
10175 if (info.inFormat == RTAUDIO_SINT16) {
\r
10176 Int16 *in = (Int16 *)inBuffer;
\r
10177 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10178 for (j=0; j<info.channels; j++) {
\r
10179 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10181 in += info.inJump;
\r
10182 out += info.outJump;
\r
10185 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10186 Int24 *in = (Int24 *)inBuffer;
\r
10187 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10188 for (j=0; j<info.channels; j++) {
\r
10189 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10191 in += info.inJump;
\r
10192 out += info.outJump;
\r
10195 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10196 Int32 *in = (Int32 *)inBuffer;
\r
10197 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10198 for (j=0; j<info.channels; j++) {
\r
10199 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10201 in += info.inJump;
\r
10202 out += info.outJump;
\r
10205 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10206 Float32 *in = (Float32 *)inBuffer;
\r
10207 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10208 for (j=0; j<info.channels; j++) {
\r
10209 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10211 in += info.inJump;
\r
10212 out += info.outJump;
\r
10215 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10216 Float64 *in = (Float64 *)inBuffer;
\r
10217 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10218 for (j=0; j<info.channels; j++) {
\r
10219 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10221 in += info.inJump;
\r
10222 out += info.outJump;
\r
10228 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10229 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10230 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10232 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10238 if ( format == RTAUDIO_SINT16 ) {
\r
10239 for ( unsigned int i=0; i<samples; i++ ) {
\r
10240 // Swap 1st and 2nd bytes.
\r
10242 *(ptr) = *(ptr+1);
\r
10245 // Increment 2 bytes.
\r
10249 else if ( format == RTAUDIO_SINT32 ||
\r
10250 format == RTAUDIO_FLOAT32 ) {
\r
10251 for ( unsigned int i=0; i<samples; i++ ) {
\r
10252 // Swap 1st and 4th bytes.
\r
10254 *(ptr) = *(ptr+3);
\r
10257 // Swap 2nd and 3rd bytes.
\r
10260 *(ptr) = *(ptr+1);
\r
10263 // Increment 3 more bytes.
\r
10267 else if ( format == RTAUDIO_SINT24 ) {
\r
10268 for ( unsigned int i=0; i<samples; i++ ) {
\r
10269 // Swap 1st and 3rd bytes.
\r
10271 *(ptr) = *(ptr+2);
\r
10274 // Increment 2 more bytes.
\r
10278 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10279 for ( unsigned int i=0; i<samples; i++ ) {
\r
10280 // Swap 1st and 8th bytes
\r
10282 *(ptr) = *(ptr+7);
\r
10285 // Swap 2nd and 7th bytes
\r
10288 *(ptr) = *(ptr+5);
\r
10291 // Swap 3rd and 6th bytes
\r
10294 *(ptr) = *(ptr+3);
\r
10297 // Swap 4th and 5th bytes
\r
10300 *(ptr) = *(ptr+1);
\r
10303 // Increment 5 more bytes.
\r
10309 // Indentation settings for Vim and Emacs
\r
10311 // Local Variables:
\r
10312 // c-basic-offset: 2
\r
10313 // indent-tabs-mode: nil
\r
10316 // vim: et sts=2 sw=2
\r