1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
51 // Static variable definitions.
\r
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
53 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
66 static std::string convertCharPointerToStdString(const char *text)
\r
68 return std::string(text);
\r
71 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
74 std::string s( length-1, '\0' );
\r
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
90 // *************************************************** //
\r
92 // RtAudio definitions.
\r
94 // *************************************************** //
\r
96 std::string RtAudio :: getVersion( void ) throw()
\r
98 return RTAUDIO_VERSION;
\r
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
105 // The order here will control the order of RtAudio's API search in
\r
106 // the constructor.
\r
107 #if defined(__UNIX_JACK__)
\r
108 apis.push_back( UNIX_JACK );
\r
110 #if defined(__LINUX_ALSA__)
\r
111 apis.push_back( LINUX_ALSA );
\r
113 #if defined(__LINUX_PULSE__)
\r
114 apis.push_back( LINUX_PULSE );
\r
116 #if defined(__LINUX_OSS__)
\r
117 apis.push_back( LINUX_OSS );
\r
119 #if defined(__WINDOWS_ASIO__)
\r
120 apis.push_back( WINDOWS_ASIO );
\r
122 #if defined(__WINDOWS_WASAPI__)
\r
123 apis.push_back( WINDOWS_WASAPI );
\r
125 #if defined(__WINDOWS_DS__)
\r
126 apis.push_back( WINDOWS_DS );
\r
128 #if defined(__MACOSX_CORE__)
\r
129 apis.push_back( MACOSX_CORE );
\r
131 #if defined(__RTAUDIO_DUMMY__)
\r
132 apis.push_back( RTAUDIO_DUMMY );
\r
136 void RtAudio :: openRtApi( RtAudio::Api api )
\r
142 #if defined(__UNIX_JACK__)
\r
143 if ( api == UNIX_JACK )
\r
144 rtapi_ = new RtApiJack();
\r
146 #if defined(__LINUX_ALSA__)
\r
147 if ( api == LINUX_ALSA )
\r
148 rtapi_ = new RtApiAlsa();
\r
150 #if defined(__LINUX_PULSE__)
\r
151 if ( api == LINUX_PULSE )
\r
152 rtapi_ = new RtApiPulse();
\r
154 #if defined(__LINUX_OSS__)
\r
155 if ( api == LINUX_OSS )
\r
156 rtapi_ = new RtApiOss();
\r
158 #if defined(__WINDOWS_ASIO__)
\r
159 if ( api == WINDOWS_ASIO )
\r
160 rtapi_ = new RtApiAsio();
\r
162 #if defined(__WINDOWS_WASAPI__)
\r
163 if ( api == WINDOWS_WASAPI )
\r
164 rtapi_ = new RtApiWasapi();
\r
166 #if defined(__WINDOWS_DS__)
\r
167 if ( api == WINDOWS_DS )
\r
168 rtapi_ = new RtApiDs();
\r
170 #if defined(__MACOSX_CORE__)
\r
171 if ( api == MACOSX_CORE )
\r
172 rtapi_ = new RtApiCore();
\r
174 #if defined(__RTAUDIO_DUMMY__)
\r
175 if ( api == RTAUDIO_DUMMY )
\r
176 rtapi_ = new RtApiDummy();
\r
180 RtAudio :: RtAudio( RtAudio::Api api )
\r
184 if ( api != UNSPECIFIED ) {
\r
185 // Attempt to open the specified API.
\r
187 if ( rtapi_ ) return;
\r
189 // No compiled support for specified API value. Issue a debug
\r
190 // warning and continue as if no API was specified.
\r
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
194 // Iterate through the compiled APIs and return as soon as we find
\r
195 // one with at least one device or we reach the end of the list.
\r
196 std::vector< RtAudio::Api > apis;
\r
197 getCompiledApi( apis );
\r
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
199 openRtApi( apis[i] );
\r
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
203 if ( rtapi_ ) return;
\r
205 // It should not be possible to get here because the preprocessor
\r
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
207 // API-specific definitions are passed to the compiler. But just in
\r
208 // case something weird happens, we'll thow an error.
\r
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
213 RtAudio :: ~RtAudio() throw()
\r
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
220 RtAudio::StreamParameters *inputParameters,
\r
221 RtAudioFormat format, unsigned int sampleRate,
\r
222 unsigned int *bufferFrames,
\r
223 RtAudioCallback callback, void *userData,
\r
224 RtAudio::StreamOptions *options,
\r
225 RtAudioErrorCallback errorCallback )
\r
227 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
228 sampleRate, bufferFrames, callback,
\r
229 userData, options, errorCallback );
\r
232 // *************************************************** //
\r
234 // Public RtApi definitions (see end of file for
\r
235 // private or protected utility functions).
\r
237 // *************************************************** //
\r
241 stream_.state = STREAM_CLOSED;
\r
242 stream_.mode = UNINITIALIZED;
\r
243 stream_.apiHandle = 0;
\r
244 stream_.userBuffer[0] = 0;
\r
245 stream_.userBuffer[1] = 0;
\r
246 MUTEX_INITIALIZE( &stream_.mutex );
\r
247 showWarnings_ = true;
\r
248 firstErrorOccurred_ = false;
\r
253 MUTEX_DESTROY( &stream_.mutex );
\r
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
257 RtAudio::StreamParameters *iParams,
\r
258 RtAudioFormat format, unsigned int sampleRate,
\r
259 unsigned int *bufferFrames,
\r
260 RtAudioCallback callback, void *userData,
\r
261 RtAudio::StreamOptions *options,
\r
262 RtAudioErrorCallback errorCallback )
\r
264 if ( stream_.state != STREAM_CLOSED ) {
\r
265 errorText_ = "RtApi::openStream: a stream is already open!";
\r
266 error( RtAudioError::INVALID_USE );
\r
270 // Clear stream information potentially left from a previously open stream.
\r
273 if ( oParams && oParams->nChannels < 1 ) {
\r
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 if ( iParams && iParams->nChannels < 1 ) {
\r
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
281 error( RtAudioError::INVALID_USE );
\r
285 if ( oParams == NULL && iParams == NULL ) {
\r
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
287 error( RtAudioError::INVALID_USE );
\r
291 if ( formatBytes(format) == 0 ) {
\r
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
293 error( RtAudioError::INVALID_USE );
\r
297 unsigned int nDevices = getDeviceCount();
\r
298 unsigned int oChannels = 0;
\r
300 oChannels = oParams->nChannels;
\r
301 if ( oParams->deviceId >= nDevices ) {
\r
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
303 error( RtAudioError::INVALID_USE );
\r
308 unsigned int iChannels = 0;
\r
310 iChannels = iParams->nChannels;
\r
311 if ( iParams->deviceId >= nDevices ) {
\r
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
313 error( RtAudioError::INVALID_USE );
\r
320 if ( oChannels > 0 ) {
\r
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
323 sampleRate, format, bufferFrames, options );
\r
324 if ( result == false ) {
\r
325 error( RtAudioError::SYSTEM_ERROR );
\r
330 if ( iChannels > 0 ) {
\r
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
333 sampleRate, format, bufferFrames, options );
\r
334 if ( result == false ) {
\r
335 if ( oChannels > 0 ) closeStream();
\r
336 error( RtAudioError::SYSTEM_ERROR );
\r
341 stream_.callbackInfo.callback = (void *) callback;
\r
342 stream_.callbackInfo.userData = userData;
\r
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
346 stream_.state = STREAM_STOPPED;
\r
349 unsigned int RtApi :: getDefaultInputDevice( void )
\r
351 // Should be implemented in subclasses if possible.
\r
355 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
357 // Should be implemented in subclasses if possible.
\r
361 void RtApi :: closeStream( void )
\r
363 // MUST be implemented in subclasses!
\r
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
370 RtAudio::StreamOptions * /*options*/ )
\r
372 // MUST be implemented in subclasses!
\r
376 void RtApi :: tickStreamTime( void )
\r
378 // Subclasses that do not provide their own implementation of
\r
379 // getStreamTime should call this function once per buffer I/O to
\r
380 // provide basic stream time support.
\r
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
384 #if defined( HAVE_GETTIMEOFDAY )
\r
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
389 long RtApi :: getStreamLatency( void )
\r
393 long totalLatency = 0;
\r
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
395 totalLatency = stream_.latency[0];
\r
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
397 totalLatency += stream_.latency[1];
\r
399 return totalLatency;
\r
402 double RtApi :: getStreamTime( void )
\r
406 #if defined( HAVE_GETTIMEOFDAY )
\r
407 // Return a very accurate estimate of the stream time by
\r
408 // adding in the elapsed time since the last tick.
\r
409 struct timeval then;
\r
410 struct timeval now;
\r
412 // If lastTickTimestamp is 0 it means we haven't had a "last tick" since
\r
413 // we started the stream.
\r
414 if ( stream_.state != STREAM_RUNNING || (stream_.lastTickTimestamp.tv_sec == 0 && stream_.lastTickTimestamp.tv_usec == 0) )
\r
415 return stream_.streamTime;
\r
417 gettimeofday( &now, NULL );
\r
418 then = stream_.lastTickTimestamp;
\r
419 return stream_.streamTime +
\r
420 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
421 (then.tv_sec + 0.000001 * then.tv_usec));
\r
423 return stream_.streamTime;
\r
427 void RtApi :: setStreamTime( double time )
\r
432 stream_.streamTime = time;
\r
435 unsigned int RtApi :: getStreamSampleRate( void )
\r
439 return stream_.sampleRate;
\r
442 void RtApi :: startStream( void )
\r
444 #if defined( HAVE_GETTIMEOFDAY )
\r
445 stream_.lastTickTimestamp.tv_sec = 0;
\r
446 stream_.lastTickTimestamp.tv_usec = 0;
\r
451 // *************************************************** //
\r
453 // OS/API-specific methods.
\r
455 // *************************************************** //
\r
457 #if defined(__MACOSX_CORE__)
\r
459 // The OS X CoreAudio API is designed to use a separate callback
\r
460 // procedure for each of its audio devices. A single RtAudio duplex
\r
461 // stream using two different devices is supported here, though it
\r
462 // cannot be guaranteed to always behave correctly because we cannot
\r
463 // synchronize these two callbacks.
\r
465 // A property listener is installed for over/underrun information.
\r
466 // However, no functionality is currently provided to allow property
\r
467 // listeners to trigger user handlers because it is unclear what could
\r
468 // be done if a critical stream parameter (buffer size, sample rate,
\r
469 // device disconnect) notification arrived. The listeners entail
\r
470 // quite a bit of extra code and most likely, a user program wouldn't
\r
471 // be prepared for the result anyway. However, we do provide a flag
\r
472 // to the client callback function to inform of an over/underrun.
\r
474 // A structure to hold various information related to the CoreAudio API
\r
476 struct CoreHandle {
\r
477 AudioDeviceID id[2]; // device ids
\r
478 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
479 AudioDeviceIOProcID procId[2];
\r
481 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
482 UInt32 nStreams[2]; // number of streams to use
\r
484 char *deviceBuffer;
\r
485 pthread_cond_t condition;
\r
486 int drainCounter; // Tracks callback counts when draining
\r
487 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
490 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
493 RtApiCore:: RtApiCore()
\r
495 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
496 // This is a largely undocumented but absolutely necessary
\r
497 // requirement starting with OS-X 10.6. If not called, queries and
\r
498 // updates to various audio device properties are not handled
\r
500 CFRunLoopRef theRunLoop = NULL;
\r
501 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
502 kAudioObjectPropertyScopeGlobal,
\r
503 kAudioObjectPropertyElementMaster };
\r
504 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
505 if ( result != noErr ) {
\r
506 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
507 error( RtAudioError::WARNING );
\r
512 RtApiCore :: ~RtApiCore()
\r
514 // The subclass destructor gets called before the base class
\r
515 // destructor, so close an existing stream before deallocating
\r
516 // apiDeviceId memory.
\r
517 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
520 unsigned int RtApiCore :: getDeviceCount( void )
\r
522 // Find out how many audio devices there are, if any.
\r
524 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
525 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
526 if ( result != noErr ) {
\r
527 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
528 error( RtAudioError::WARNING );
\r
532 return dataSize / sizeof( AudioDeviceID );
\r
535 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
537 unsigned int nDevices = getDeviceCount();
\r
538 if ( nDevices <= 1 ) return 0;
\r
541 UInt32 dataSize = sizeof( AudioDeviceID );
\r
542 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
543 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
544 if ( result != noErr ) {
\r
545 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
546 error( RtAudioError::WARNING );
\r
550 dataSize *= nDevices;
\r
551 AudioDeviceID deviceList[ nDevices ];
\r
552 property.mSelector = kAudioHardwarePropertyDevices;
\r
553 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
554 if ( result != noErr ) {
\r
555 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
556 error( RtAudioError::WARNING );
\r
560 for ( unsigned int i=0; i<nDevices; i++ )
\r
561 if ( id == deviceList[i] ) return i;
\r
563 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
564 error( RtAudioError::WARNING );
\r
568 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
570 unsigned int nDevices = getDeviceCount();
\r
571 if ( nDevices <= 1 ) return 0;
\r
574 UInt32 dataSize = sizeof( AudioDeviceID );
\r
575 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
576 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
577 if ( result != noErr ) {
\r
578 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
579 error( RtAudioError::WARNING );
\r
583 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
584 AudioDeviceID deviceList[ nDevices ];
\r
585 property.mSelector = kAudioHardwarePropertyDevices;
\r
586 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
587 if ( result != noErr ) {
\r
588 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
589 error( RtAudioError::WARNING );
\r
593 for ( unsigned int i=0; i<nDevices; i++ )
\r
594 if ( id == deviceList[i] ) return i;
\r
596 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
597 error( RtAudioError::WARNING );
\r
601 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
603 RtAudio::DeviceInfo info;
\r
604 info.probed = false;
\r
607 unsigned int nDevices = getDeviceCount();
\r
608 if ( nDevices == 0 ) {
\r
609 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
610 error( RtAudioError::INVALID_USE );
\r
614 if ( device >= nDevices ) {
\r
615 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
616 error( RtAudioError::INVALID_USE );
\r
620 AudioDeviceID deviceList[ nDevices ];
\r
621 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
622 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
623 kAudioObjectPropertyScopeGlobal,
\r
624 kAudioObjectPropertyElementMaster };
\r
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
626 0, NULL, &dataSize, (void *) &deviceList );
\r
627 if ( result != noErr ) {
\r
628 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
629 error( RtAudioError::WARNING );
\r
633 AudioDeviceID id = deviceList[ device ];
\r
635 // Get the device name.
\r
637 CFStringRef cfname;
\r
638 dataSize = sizeof( CFStringRef );
\r
639 property.mSelector = kAudioObjectPropertyManufacturer;
\r
640 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
641 if ( result != noErr ) {
\r
642 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
643 errorText_ = errorStream_.str();
\r
644 error( RtAudioError::WARNING );
\r
648 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
649 int length = CFStringGetLength(cfname);
\r
650 char *mname = (char *)malloc(length * 3 + 1);
\r
651 #if defined( UNICODE ) || defined( _UNICODE )
\r
652 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
654 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
656 info.name.append( (const char *)mname, strlen(mname) );
\r
657 info.name.append( ": " );
\r
658 CFRelease( cfname );
\r
661 property.mSelector = kAudioObjectPropertyName;
\r
662 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
663 if ( result != noErr ) {
\r
664 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
665 errorText_ = errorStream_.str();
\r
666 error( RtAudioError::WARNING );
\r
670 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
671 length = CFStringGetLength(cfname);
\r
672 char *name = (char *)malloc(length * 3 + 1);
\r
673 #if defined( UNICODE ) || defined( _UNICODE )
\r
674 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
676 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
678 info.name.append( (const char *)name, strlen(name) );
\r
679 CFRelease( cfname );
\r
682 // Get the output stream "configuration".
\r
683 AudioBufferList *bufferList = nil;
\r
684 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
685 property.mScope = kAudioDevicePropertyScopeOutput;
\r
686 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
688 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
689 if ( result != noErr || dataSize == 0 ) {
\r
690 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
691 errorText_ = errorStream_.str();
\r
692 error( RtAudioError::WARNING );
\r
696 // Allocate the AudioBufferList.
\r
697 bufferList = (AudioBufferList *) malloc( dataSize );
\r
698 if ( bufferList == NULL ) {
\r
699 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
700 error( RtAudioError::WARNING );
\r
704 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
705 if ( result != noErr || dataSize == 0 ) {
\r
706 free( bufferList );
\r
707 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
708 errorText_ = errorStream_.str();
\r
709 error( RtAudioError::WARNING );
\r
713 // Get output channel information.
\r
714 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
715 for ( i=0; i<nStreams; i++ )
\r
716 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
717 free( bufferList );
\r
719 // Get the input stream "configuration".
\r
720 property.mScope = kAudioDevicePropertyScopeInput;
\r
721 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
722 if ( result != noErr || dataSize == 0 ) {
\r
723 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
724 errorText_ = errorStream_.str();
\r
725 error( RtAudioError::WARNING );
\r
729 // Allocate the AudioBufferList.
\r
730 bufferList = (AudioBufferList *) malloc( dataSize );
\r
731 if ( bufferList == NULL ) {
\r
732 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
733 error( RtAudioError::WARNING );
\r
737 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
738 if (result != noErr || dataSize == 0) {
\r
739 free( bufferList );
\r
740 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
741 errorText_ = errorStream_.str();
\r
742 error( RtAudioError::WARNING );
\r
746 // Get input channel information.
\r
747 nStreams = bufferList->mNumberBuffers;
\r
748 for ( i=0; i<nStreams; i++ )
\r
749 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
750 free( bufferList );
\r
752 // If device opens for both playback and capture, we determine the channels.
\r
753 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
754 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
756 // Probe the device sample rates.
\r
757 bool isInput = false;
\r
758 if ( info.outputChannels == 0 ) isInput = true;
\r
760 // Determine the supported sample rates.
\r
761 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
762 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
763 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
764 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
765 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
766 errorText_ = errorStream_.str();
\r
767 error( RtAudioError::WARNING );
\r
771 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
772 AudioValueRange rangeList[ nRanges ];
\r
773 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
774 if ( result != kAudioHardwareNoError ) {
\r
775 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
776 errorText_ = errorStream_.str();
\r
777 error( RtAudioError::WARNING );
\r
781 // The sample rate reporting mechanism is a bit of a mystery. It
\r
782 // seems that it can either return individual rates or a range of
\r
783 // rates. I assume that if the min / max range values are the same,
\r
784 // then that represents a single supported rate and if the min / max
\r
785 // range values are different, the device supports an arbitrary
\r
786 // range of values (though there might be multiple ranges, so we'll
\r
787 // use the most conservative range).
\r
788 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
789 bool haveValueRange = false;
\r
790 info.sampleRates.clear();
\r
791 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
792 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
793 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
794 info.sampleRates.push_back( tmpSr );
\r
796 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
797 info.preferredSampleRate = tmpSr;
\r
800 haveValueRange = true;
\r
801 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
802 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
806 if ( haveValueRange ) {
\r
807 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
808 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
809 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
811 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
812 info.preferredSampleRate = SAMPLE_RATES[k];
\r
817 // Sort and remove any redundant values
\r
818 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
819 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
821 if ( info.sampleRates.size() == 0 ) {
\r
822 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
823 errorText_ = errorStream_.str();
\r
824 error( RtAudioError::WARNING );
\r
828 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
829 // Thus, any other "physical" formats supported by the device are of
\r
830 // no interest to the client.
\r
831 info.nativeFormats = RTAUDIO_FLOAT32;
\r
833 if ( info.outputChannels > 0 )
\r
834 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
835 if ( info.inputChannels > 0 )
\r
836 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
838 info.probed = true;
\r
842 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
843 const AudioTimeStamp* /*inNow*/,
\r
844 const AudioBufferList* inInputData,
\r
845 const AudioTimeStamp* /*inInputTime*/,
\r
846 AudioBufferList* outOutputData,
\r
847 const AudioTimeStamp* /*inOutputTime*/,
\r
848 void* infoPointer )
\r
850 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
852 RtApiCore *object = (RtApiCore *) info->object;
\r
853 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
854 return kAudioHardwareUnspecifiedError;
\r
856 return kAudioHardwareNoError;
\r
859 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
861 const AudioObjectPropertyAddress properties[],
\r
862 void* handlePointer )
\r
864 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
865 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
866 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
867 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
868 handle->xrun[1] = true;
\r
870 handle->xrun[0] = true;
\r
874 return kAudioHardwareNoError;
\r
877 static OSStatus rateListener( AudioObjectID inDevice,
\r
878 UInt32 /*nAddresses*/,
\r
879 const AudioObjectPropertyAddress /*properties*/[],
\r
880 void* ratePointer )
\r
882 Float64 *rate = (Float64 *) ratePointer;
\r
883 UInt32 dataSize = sizeof( Float64 );
\r
884 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
885 kAudioObjectPropertyScopeGlobal,
\r
886 kAudioObjectPropertyElementMaster };
\r
887 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
888 return kAudioHardwareNoError;
\r
891 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
892 unsigned int firstChannel, unsigned int sampleRate,
\r
893 RtAudioFormat format, unsigned int *bufferSize,
\r
894 RtAudio::StreamOptions *options )
\r
897 unsigned int nDevices = getDeviceCount();
\r
898 if ( nDevices == 0 ) {
\r
899 // This should not happen because a check is made before this function is called.
\r
900 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
904 if ( device >= nDevices ) {
\r
905 // This should not happen because a check is made before this function is called.
\r
906 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
910 AudioDeviceID deviceList[ nDevices ];
\r
911 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
912 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
913 kAudioObjectPropertyScopeGlobal,
\r
914 kAudioObjectPropertyElementMaster };
\r
915 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
916 0, NULL, &dataSize, (void *) &deviceList );
\r
917 if ( result != noErr ) {
\r
918 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
922 AudioDeviceID id = deviceList[ device ];
\r
924 // Setup for stream mode.
\r
925 bool isInput = false;
\r
926 if ( mode == INPUT ) {
\r
928 property.mScope = kAudioDevicePropertyScopeInput;
\r
931 property.mScope = kAudioDevicePropertyScopeOutput;
\r
933 // Get the stream "configuration".
\r
934 AudioBufferList *bufferList = nil;
\r
936 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
937 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
938 if ( result != noErr || dataSize == 0 ) {
\r
939 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
940 errorText_ = errorStream_.str();
\r
944 // Allocate the AudioBufferList.
\r
945 bufferList = (AudioBufferList *) malloc( dataSize );
\r
946 if ( bufferList == NULL ) {
\r
947 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
951 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
952 if (result != noErr || dataSize == 0) {
\r
953 free( bufferList );
\r
954 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
955 errorText_ = errorStream_.str();
\r
959 // Search for one or more streams that contain the desired number of
\r
960 // channels. CoreAudio devices can have an arbitrary number of
\r
961 // streams and each stream can have an arbitrary number of channels.
\r
962 // For each stream, a single buffer of interleaved samples is
\r
963 // provided. RtAudio prefers the use of one stream of interleaved
\r
964 // data or multiple consecutive single-channel streams. However, we
\r
965 // now support multiple consecutive multi-channel streams of
\r
966 // interleaved data as well.
\r
967 UInt32 iStream, offsetCounter = firstChannel;
\r
968 UInt32 nStreams = bufferList->mNumberBuffers;
\r
969 bool monoMode = false;
\r
970 bool foundStream = false;
\r
972 // First check that the device supports the requested number of
\r
974 UInt32 deviceChannels = 0;
\r
975 for ( iStream=0; iStream<nStreams; iStream++ )
\r
976 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
979 free( bufferList );
\r
980 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
981 errorText_ = errorStream_.str();
\r
985 // Look for a single stream meeting our needs.
\r
986 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
987 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
988 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
989 if ( streamChannels >= channels + offsetCounter ) {
\r
990 firstStream = iStream;
\r
991 channelOffset = offsetCounter;
\r
992 foundStream = true;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 // If we didn't find a single stream above, then we should be able
\r
1000 // to meet the channel specification with multiple streams.
\r
1001 if ( foundStream == false ) {
\r
1003 offsetCounter = firstChannel;
\r
1004 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
1005 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
1006 if ( streamChannels > offsetCounter ) break;
\r
1007 offsetCounter -= streamChannels;
\r
1010 firstStream = iStream;
\r
1011 channelOffset = offsetCounter;
\r
1012 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1014 if ( streamChannels > 1 ) monoMode = false;
\r
1015 while ( channelCounter > 0 ) {
\r
1016 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1017 if ( streamChannels > 1 ) monoMode = false;
\r
1018 channelCounter -= streamChannels;
\r
1023 free( bufferList );
\r
1025 // Determine the buffer size.
\r
1026 AudioValueRange bufferRange;
\r
1027 dataSize = sizeof( AudioValueRange );
\r
1028 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1029 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1031 if ( result != noErr ) {
\r
1032 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1033 errorText_ = errorStream_.str();
\r
1037 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1038 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1039 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1041 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1042 // need to make this setting for the master channel.
\r
1043 UInt32 theSize = (UInt32) *bufferSize;
\r
1044 dataSize = sizeof( UInt32 );
\r
1045 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1046 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1048 if ( result != noErr ) {
\r
1049 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1050 errorText_ = errorStream_.str();
\r
1054 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1055 // MUST be the same in both directions!
\r
1056 *bufferSize = theSize;
\r
1057 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1058 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1059 errorText_ = errorStream_.str();
\r
1063 stream_.bufferSize = *bufferSize;
\r
1064 stream_.nBuffers = 1;
\r
1066 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1067 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1069 dataSize = sizeof( hog_pid );
\r
1070 property.mSelector = kAudioDevicePropertyHogMode;
\r
1071 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1072 if ( result != noErr ) {
\r
1073 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1074 errorText_ = errorStream_.str();
\r
1078 if ( hog_pid != getpid() ) {
\r
1079 hog_pid = getpid();
\r
1080 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1081 if ( result != noErr ) {
\r
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1083 errorText_ = errorStream_.str();
\r
1089 // Check and if necessary, change the sample rate for the device.
\r
1090 Float64 nominalRate;
\r
1091 dataSize = sizeof( Float64 );
\r
1092 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1093 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1094 if ( result != noErr ) {
\r
1095 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1096 errorText_ = errorStream_.str();
\r
1100 // Only change the sample rate if off by more than 1 Hz.
\r
1101 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1103 // Set a property listener for the sample rate change
\r
1104 Float64 reportedRate = 0.0;
\r
1105 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1106 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1107 if ( result != noErr ) {
\r
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1109 errorText_ = errorStream_.str();
\r
1113 nominalRate = (Float64) sampleRate;
\r
1114 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1115 if ( result != noErr ) {
\r
1116 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1117 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1118 errorText_ = errorStream_.str();
\r
1122 // Now wait until the reported nominal rate is what we just set.
\r
1123 UInt32 microCounter = 0;
\r
1124 while ( reportedRate != nominalRate ) {
\r
1125 microCounter += 5000;
\r
1126 if ( microCounter > 5000000 ) break;
\r
1130 // Remove the property listener.
\r
1131 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1133 if ( microCounter > 5000000 ) {
\r
1134 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1135 errorText_ = errorStream_.str();
\r
1140 // Now set the stream format for all streams. Also, check the
\r
1141 // physical format of the device and change that if necessary.
\r
1142 AudioStreamBasicDescription description;
\r
1143 dataSize = sizeof( AudioStreamBasicDescription );
\r
1144 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1145 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1146 if ( result != noErr ) {
\r
1147 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1148 errorText_ = errorStream_.str();
\r
1152 // Set the sample rate and data format id. However, only make the
\r
1153 // change if the sample rate is not within 1.0 of the desired
\r
1154 // rate and the format is not linear pcm.
\r
1155 bool updateFormat = false;
\r
1156 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1157 description.mSampleRate = (Float64) sampleRate;
\r
1158 updateFormat = true;
\r
1161 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1162 description.mFormatID = kAudioFormatLinearPCM;
\r
1163 updateFormat = true;
\r
1166 if ( updateFormat ) {
\r
1167 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1168 if ( result != noErr ) {
\r
1169 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1170 errorText_ = errorStream_.str();
\r
1175 // Now check the physical format.
\r
1176 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1177 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1178 if ( result != noErr ) {
\r
1179 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1180 errorText_ = errorStream_.str();
\r
1184 //std::cout << "Current physical stream format:" << std::endl;
\r
1185 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1186 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1187 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1188 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1190 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1191 description.mFormatID = kAudioFormatLinearPCM;
\r
1192 //description.mSampleRate = (Float64) sampleRate;
\r
1193 AudioStreamBasicDescription testDescription = description;
\r
1194 UInt32 formatFlags;
\r
1196 // We'll try higher bit rates first and then work our way down.
\r
1197 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1198 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1203 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1204 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1205 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1206 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1207 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1208 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1209 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1211 bool setPhysicalFormat = false;
\r
1212 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1213 testDescription = description;
\r
1214 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1215 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1216 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1217 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1219 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1220 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1221 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1222 if ( result == noErr ) {
\r
1223 setPhysicalFormat = true;
\r
1224 //std::cout << "Updated physical stream format:" << std::endl;
\r
1225 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1226 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1227 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1228 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1233 if ( !setPhysicalFormat ) {
\r
1234 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1235 errorText_ = errorStream_.str();
\r
1238 } // done setting virtual/physical formats.
\r
1240 // Get the stream / device latency.
\r
1242 dataSize = sizeof( UInt32 );
\r
1243 property.mSelector = kAudioDevicePropertyLatency;
\r
1244 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1245 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1246 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1248 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1249 errorText_ = errorStream_.str();
\r
1250 error( RtAudioError::WARNING );
\r
1254 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1255 // always be presented in native-endian format, so we should never
\r
1256 // need to byte swap.
\r
1257 stream_.doByteSwap[mode] = false;
\r
1259 // From the CoreAudio documentation, PCM data must be supplied as
\r
1261 stream_.userFormat = format;
\r
1262 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1264 if ( streamCount == 1 )
\r
1265 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1266 else // multiple streams
\r
1267 stream_.nDeviceChannels[mode] = channels;
\r
1268 stream_.nUserChannels[mode] = channels;
\r
1269 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1270 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1271 else stream_.userInterleaved = true;
\r
1272 stream_.deviceInterleaved[mode] = true;
\r
1273 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1275 // Set flags for buffer conversion.
\r
1276 stream_.doConvertBuffer[mode] = false;
\r
1277 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1278 stream_.doConvertBuffer[mode] = true;
\r
1279 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1280 stream_.doConvertBuffer[mode] = true;
\r
1281 if ( streamCount == 1 ) {
\r
1282 if ( stream_.nUserChannels[mode] > 1 &&
\r
1283 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1284 stream_.doConvertBuffer[mode] = true;
\r
1286 else if ( monoMode && stream_.userInterleaved )
\r
1287 stream_.doConvertBuffer[mode] = true;
\r
1289 // Allocate our CoreHandle structure for the stream.
\r
1290 CoreHandle *handle = 0;
\r
1291 if ( stream_.apiHandle == 0 ) {
\r
1293 handle = new CoreHandle;
\r
1295 catch ( std::bad_alloc& ) {
\r
1296 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1300 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1301 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1304 stream_.apiHandle = (void *) handle;
\r
1307 handle = (CoreHandle *) stream_.apiHandle;
\r
1308 handle->iStream[mode] = firstStream;
\r
1309 handle->nStreams[mode] = streamCount;
\r
1310 handle->id[mode] = id;
\r
1312 // Allocate necessary internal buffers.
\r
1313 unsigned long bufferBytes;
\r
1314 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1315 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1316 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1317 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1318 if ( stream_.userBuffer[mode] == NULL ) {
\r
1319 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1323 // If possible, we will make use of the CoreAudio stream buffers as
\r
1324 // "device buffers". However, we can't do this if using multiple
\r
1326 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1328 bool makeBuffer = true;
\r
1329 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1330 if ( mode == INPUT ) {
\r
1331 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1332 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1333 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1337 if ( makeBuffer ) {
\r
1338 bufferBytes *= *bufferSize;
\r
1339 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1340 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1341 if ( stream_.deviceBuffer == NULL ) {
\r
1342 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1348 stream_.sampleRate = sampleRate;
\r
1349 stream_.device[mode] = device;
\r
1350 stream_.state = STREAM_STOPPED;
\r
1351 stream_.callbackInfo.object = (void *) this;
\r
1353 // Setup the buffer conversion information structure.
\r
1354 if ( stream_.doConvertBuffer[mode] ) {
\r
1355 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1356 else setConvertInfo( mode, channelOffset );
\r
1359 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1360 // Only one callback procedure per device.
\r
1361 stream_.mode = DUPLEX;
\r
1363 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1364 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1366 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1367 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1369 if ( result != noErr ) {
\r
1370 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1371 errorText_ = errorStream_.str();
\r
1374 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1375 stream_.mode = DUPLEX;
\r
1377 stream_.mode = mode;
\r
1380 // Setup the device property listener for over/underload.
\r
1381 property.mSelector = kAudioDeviceProcessorOverload;
\r
1382 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1383 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1389 pthread_cond_destroy( &handle->condition );
\r
1391 stream_.apiHandle = 0;
\r
1394 for ( int i=0; i<2; i++ ) {
\r
1395 if ( stream_.userBuffer[i] ) {
\r
1396 free( stream_.userBuffer[i] );
\r
1397 stream_.userBuffer[i] = 0;
\r
1401 if ( stream_.deviceBuffer ) {
\r
1402 free( stream_.deviceBuffer );
\r
1403 stream_.deviceBuffer = 0;
\r
1406 stream_.state = STREAM_CLOSED;
\r
1410 void RtApiCore :: closeStream( void )
\r
1412 if ( stream_.state == STREAM_CLOSED ) {
\r
1413 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1414 error( RtAudioError::WARNING );
\r
1418 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1419 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1421 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1422 kAudioObjectPropertyScopeGlobal,
\r
1423 kAudioObjectPropertyElementMaster };
\r
1425 property.mSelector = kAudioDeviceProcessorOverload;
\r
1426 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1427 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1428 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1429 error( RtAudioError::WARNING );
\r
1432 if ( stream_.state == STREAM_RUNNING )
\r
1433 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1434 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1435 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1437 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1438 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1442 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1444 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1445 kAudioObjectPropertyScopeGlobal,
\r
1446 kAudioObjectPropertyElementMaster };
\r
1448 property.mSelector = kAudioDeviceProcessorOverload;
\r
1449 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1450 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1451 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1452 error( RtAudioError::WARNING );
\r
1455 if ( stream_.state == STREAM_RUNNING )
\r
1456 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1457 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1458 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1460 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1461 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1465 for ( int i=0; i<2; i++ ) {
\r
1466 if ( stream_.userBuffer[i] ) {
\r
1467 free( stream_.userBuffer[i] );
\r
1468 stream_.userBuffer[i] = 0;
\r
1472 if ( stream_.deviceBuffer ) {
\r
1473 free( stream_.deviceBuffer );
\r
1474 stream_.deviceBuffer = 0;
\r
1477 // Destroy pthread condition variable.
\r
1478 pthread_cond_destroy( &handle->condition );
\r
1480 stream_.apiHandle = 0;
\r
1482 stream_.mode = UNINITIALIZED;
\r
1483 stream_.state = STREAM_CLOSED;
\r
1486 void RtApiCore :: startStream( void )
\r
1489 RtApi::startStream();
\r
1490 if ( stream_.state == STREAM_RUNNING ) {
\r
1491 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1492 error( RtAudioError::WARNING );
\r
1496 OSStatus result = noErr;
\r
1497 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1498 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1500 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1501 if ( result != noErr ) {
\r
1502 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1503 errorText_ = errorStream_.str();
\r
1508 if ( stream_.mode == INPUT ||
\r
1509 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1511 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1512 if ( result != noErr ) {
\r
1513 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1514 errorText_ = errorStream_.str();
\r
1519 handle->drainCounter = 0;
\r
1520 handle->internalDrain = false;
\r
1521 stream_.state = STREAM_RUNNING;
\r
1524 if ( result == noErr ) return;
\r
1525 error( RtAudioError::SYSTEM_ERROR );
\r
1528 void RtApiCore :: stopStream( void )
\r
1531 if ( stream_.state == STREAM_STOPPED ) {
\r
1532 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1533 error( RtAudioError::WARNING );
\r
1537 OSStatus result = noErr;
\r
1538 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1539 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1541 if ( handle->drainCounter == 0 ) {
\r
1542 handle->drainCounter = 2;
\r
1543 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1546 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1547 if ( result != noErr ) {
\r
1548 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1549 errorText_ = errorStream_.str();
\r
1554 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1556 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1557 if ( result != noErr ) {
\r
1558 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1559 errorText_ = errorStream_.str();
\r
1564 stream_.state = STREAM_STOPPED;
\r
1567 if ( result == noErr ) return;
\r
1568 error( RtAudioError::SYSTEM_ERROR );
\r
1571 void RtApiCore :: abortStream( void )
\r
1574 if ( stream_.state == STREAM_STOPPED ) {
\r
1575 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1576 error( RtAudioError::WARNING );
\r
1580 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1581 handle->drainCounter = 2;
\r
1586 // This function will be called by a spawned thread when the user
\r
1587 // callback function signals that the stream should be stopped or
\r
1588 // aborted. It is better to handle it this way because the
\r
1589 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1590 // function is called.
\r
1591 static void *coreStopStream( void *ptr )
\r
1593 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1594 RtApiCore *object = (RtApiCore *) info->object;
\r
1596 object->stopStream();
\r
1597 pthread_exit( NULL );
\r
1600 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1601 const AudioBufferList *inBufferList,
\r
1602 const AudioBufferList *outBufferList )
\r
1604 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1605 if ( stream_.state == STREAM_CLOSED ) {
\r
1606 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1607 error( RtAudioError::WARNING );
\r
1611 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1612 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1614 // Check if we were draining the stream and signal is finished.
\r
1615 if ( handle->drainCounter > 3 ) {
\r
1616 ThreadHandle threadId;
\r
1618 stream_.state = STREAM_STOPPING;
\r
1619 if ( handle->internalDrain == true )
\r
1620 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1621 else // external call to stopStream()
\r
1622 pthread_cond_signal( &handle->condition );
\r
1626 AudioDeviceID outputDevice = handle->id[0];
\r
1628 // Invoke user callback to get fresh output data UNLESS we are
\r
1629 // draining stream or duplex mode AND the input/output devices are
\r
1630 // different AND this function is called for the input device.
\r
1631 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1632 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1633 double streamTime = getStreamTime();
\r
1634 RtAudioStreamStatus status = 0;
\r
1635 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1636 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1637 handle->xrun[0] = false;
\r
1639 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1640 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1641 handle->xrun[1] = false;
\r
1644 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1645 stream_.bufferSize, streamTime, status, info->userData );
\r
1646 if ( cbReturnValue == 2 ) {
\r
1647 stream_.state = STREAM_STOPPING;
\r
1648 handle->drainCounter = 2;
\r
1652 else if ( cbReturnValue == 1 ) {
\r
1653 handle->drainCounter = 1;
\r
1654 handle->internalDrain = true;
\r
1658 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1660 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1662 if ( handle->nStreams[0] == 1 ) {
\r
1663 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1665 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1667 else { // fill multiple streams with zeros
\r
1668 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1669 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1671 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1675 else if ( handle->nStreams[0] == 1 ) {
\r
1676 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1677 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1678 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1680 else { // copy from user buffer
\r
1681 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1682 stream_.userBuffer[0],
\r
1683 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1686 else { // fill multiple streams
\r
1687 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1688 if ( stream_.doConvertBuffer[0] ) {
\r
1689 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1690 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1693 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1694 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1695 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1696 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1697 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1700 else { // fill multiple multi-channel streams with interleaved data
\r
1701 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1702 Float32 *out, *in;
\r
1704 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1705 UInt32 inChannels = stream_.nUserChannels[0];
\r
1706 if ( stream_.doConvertBuffer[0] ) {
\r
1707 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1708 inChannels = stream_.nDeviceChannels[0];
\r
1711 if ( inInterleaved ) inOffset = 1;
\r
1712 else inOffset = stream_.bufferSize;
\r
1714 channelsLeft = inChannels;
\r
1715 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1717 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1718 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1721 // Account for possible channel offset in first stream
\r
1722 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1723 streamChannels -= stream_.channelOffset[0];
\r
1724 outJump = stream_.channelOffset[0];
\r
1728 // Account for possible unfilled channels at end of the last stream
\r
1729 if ( streamChannels > channelsLeft ) {
\r
1730 outJump = streamChannels - channelsLeft;
\r
1731 streamChannels = channelsLeft;
\r
1734 // Determine input buffer offsets and skips
\r
1735 if ( inInterleaved ) {
\r
1736 inJump = inChannels;
\r
1737 in += inChannels - channelsLeft;
\r
1741 in += (inChannels - channelsLeft) * inOffset;
\r
1744 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1745 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1746 *out++ = in[j*inOffset];
\r
1751 channelsLeft -= streamChannels;
\r
1757 // Don't bother draining input
\r
1758 if ( handle->drainCounter ) {
\r
1759 handle->drainCounter++;
\r
1763 AudioDeviceID inputDevice;
\r
1764 inputDevice = handle->id[1];
\r
1765 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1767 if ( handle->nStreams[1] == 1 ) {
\r
1768 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1769 convertBuffer( stream_.userBuffer[1],
\r
1770 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1771 stream_.convertInfo[1] );
\r
1773 else { // copy to user buffer
\r
1774 memcpy( stream_.userBuffer[1],
\r
1775 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1776 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1779 else { // read from multiple streams
\r
1780 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1781 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1783 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1784 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1785 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1786 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1787 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1790 else { // read from multiple multi-channel streams
\r
1791 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1792 Float32 *out, *in;
\r
1794 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1795 UInt32 outChannels = stream_.nUserChannels[1];
\r
1796 if ( stream_.doConvertBuffer[1] ) {
\r
1797 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1798 outChannels = stream_.nDeviceChannels[1];
\r
1801 if ( outInterleaved ) outOffset = 1;
\r
1802 else outOffset = stream_.bufferSize;
\r
1804 channelsLeft = outChannels;
\r
1805 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1807 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1808 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1811 // Account for possible channel offset in first stream
\r
1812 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1813 streamChannels -= stream_.channelOffset[1];
\r
1814 inJump = stream_.channelOffset[1];
\r
1818 // Account for possible unread channels at end of the last stream
\r
1819 if ( streamChannels > channelsLeft ) {
\r
1820 inJump = streamChannels - channelsLeft;
\r
1821 streamChannels = channelsLeft;
\r
1824 // Determine output buffer offsets and skips
\r
1825 if ( outInterleaved ) {
\r
1826 outJump = outChannels;
\r
1827 out += outChannels - channelsLeft;
\r
1831 out += (outChannels - channelsLeft) * outOffset;
\r
1834 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1835 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1836 out[j*outOffset] = *in++;
\r
1841 channelsLeft -= streamChannels;
\r
1845 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1846 convertBuffer( stream_.userBuffer[1],
\r
1847 stream_.deviceBuffer,
\r
1848 stream_.convertInfo[1] );
\r
1854 //MUTEX_UNLOCK( &stream_.mutex );
\r
1856 RtApi::tickStreamTime();
\r
1860 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1864 case kAudioHardwareNotRunningError:
\r
1865 return "kAudioHardwareNotRunningError";
\r
1867 case kAudioHardwareUnspecifiedError:
\r
1868 return "kAudioHardwareUnspecifiedError";
\r
1870 case kAudioHardwareUnknownPropertyError:
\r
1871 return "kAudioHardwareUnknownPropertyError";
\r
1873 case kAudioHardwareBadPropertySizeError:
\r
1874 return "kAudioHardwareBadPropertySizeError";
\r
1876 case kAudioHardwareIllegalOperationError:
\r
1877 return "kAudioHardwareIllegalOperationError";
\r
1879 case kAudioHardwareBadObjectError:
\r
1880 return "kAudioHardwareBadObjectError";
\r
1882 case kAudioHardwareBadDeviceError:
\r
1883 return "kAudioHardwareBadDeviceError";
\r
1885 case kAudioHardwareBadStreamError:
\r
1886 return "kAudioHardwareBadStreamError";
\r
1888 case kAudioHardwareUnsupportedOperationError:
\r
1889 return "kAudioHardwareUnsupportedOperationError";
\r
1891 case kAudioDeviceUnsupportedFormatError:
\r
1892 return "kAudioDeviceUnsupportedFormatError";
\r
1894 case kAudioDevicePermissionsError:
\r
1895 return "kAudioDevicePermissionsError";
\r
1898 return "CoreAudio unknown error";
\r
1902 //******************** End of __MACOSX_CORE__ *********************//
\r
1905 #if defined(__UNIX_JACK__)
\r
1907 // JACK is a low-latency audio server, originally written for the
\r
1908 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1909 // connect a number of different applications to an audio device, as
\r
1910 // well as allowing them to share audio between themselves.
\r
1912 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1913 // have ports connected to the server. The JACK server is typically
\r
1914 // started in a terminal as follows:
\r
1916 // .jackd -d alsa -d hw:0
\r
1918 // or through an interface program such as qjackctl. Many of the
\r
1919 // parameters normally set for a stream are fixed by the JACK server
\r
1920 // and can be specified when the JACK server is started. In
\r
1923 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1925 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1926 // frames, and number of buffers = 4. Once the server is running, it
\r
1927 // is not possible to override these values. If the values are not
\r
1928 // specified in the command-line, the JACK server uses default values.
\r
1930 // The JACK server does not have to be running when an instance of
\r
1931 // RtApiJack is created, though the function getDeviceCount() will
\r
1932 // report 0 devices found until JACK has been started. When no
\r
1933 // devices are available (i.e., the JACK server is not running), a
\r
1934 // stream cannot be opened.
\r
1936 #include <jack/jack.h>
\r
1937 #include <unistd.h>
\r
1940 // A structure to hold various information related to the Jack API
\r
1941 // implementation.
\r
1942 struct JackHandle {
\r
1943 jack_client_t *client;
\r
1944 jack_port_t **ports[2];
\r
1945 std::string deviceName[2];
\r
1947 pthread_cond_t condition;
\r
1948 int drainCounter; // Tracks callback counts when draining
\r
1949 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1952 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1955 static void jackSilentError( const char * ) {};
\r
1957 RtApiJack :: RtApiJack()
\r
1959 // Nothing to do here.
\r
1960 #if !defined(__RTAUDIO_DEBUG__)
\r
1961 // Turn off Jack's internal error reporting.
\r
1962 jack_set_error_function( &jackSilentError );
\r
1966 RtApiJack :: ~RtApiJack()
\r
1968 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1971 unsigned int RtApiJack :: getDeviceCount( void )
\r
1973 // See if we can become a jack client.
\r
1974 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1975 jack_status_t *status = NULL;
\r
1976 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1977 if ( client == 0 ) return 0;
\r
1979 const char **ports;
\r
1980 std::string port, previousPort;
\r
1981 unsigned int nChannels = 0, nDevices = 0;
\r
1982 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1984 // Parse the port names up to the first colon (:).
\r
1985 size_t iColon = 0;
\r
1987 port = (char *) ports[ nChannels ];
\r
1988 iColon = port.find(":");
\r
1989 if ( iColon != std::string::npos ) {
\r
1990 port = port.substr( 0, iColon + 1 );
\r
1991 if ( port != previousPort ) {
\r
1993 previousPort = port;
\r
1996 } while ( ports[++nChannels] );
\r
2000 jack_client_close( client );
\r
2004 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
2006 RtAudio::DeviceInfo info;
\r
2007 info.probed = false;
\r
2009 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
2010 jack_status_t *status = NULL;
\r
2011 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2012 if ( client == 0 ) {
\r
2013 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2014 error( RtAudioError::WARNING );
\r
2018 const char **ports;
\r
2019 std::string port, previousPort;
\r
2020 unsigned int nPorts = 0, nDevices = 0;
\r
2021 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2023 // Parse the port names up to the first colon (:).
\r
2024 size_t iColon = 0;
\r
2026 port = (char *) ports[ nPorts ];
\r
2027 iColon = port.find(":");
\r
2028 if ( iColon != std::string::npos ) {
\r
2029 port = port.substr( 0, iColon );
\r
2030 if ( port != previousPort ) {
\r
2031 if ( nDevices == device ) info.name = port;
\r
2033 previousPort = port;
\r
2036 } while ( ports[++nPorts] );
\r
2040 if ( device >= nDevices ) {
\r
2041 jack_client_close( client );
\r
2042 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2043 error( RtAudioError::INVALID_USE );
\r
2047 // Get the current jack server sample rate.
\r
2048 info.sampleRates.clear();
\r
2050 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2051 info.sampleRates.push_back( info.preferredSampleRate );
\r
2053 // Count the available ports containing the client name as device
\r
2054 // channels. Jack "input ports" equal RtAudio output channels.
\r
2055 unsigned int nChannels = 0;
\r
2056 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2058 while ( ports[ nChannels ] ) nChannels++;
\r
2060 info.outputChannels = nChannels;
\r
2063 // Jack "output ports" equal RtAudio input channels.
\r
2065 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2067 while ( ports[ nChannels ] ) nChannels++;
\r
2069 info.inputChannels = nChannels;
\r
2072 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2073 jack_client_close(client);
\r
2074 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2075 error( RtAudioError::WARNING );
\r
2079 // If device opens for both playback and capture, we determine the channels.
\r
2080 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2081 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2083 // Jack always uses 32-bit floats.
\r
2084 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2086 // Jack doesn't provide default devices so we'll use the first available one.
\r
2087 if ( device == 0 && info.outputChannels > 0 )
\r
2088 info.isDefaultOutput = true;
\r
2089 if ( device == 0 && info.inputChannels > 0 )
\r
2090 info.isDefaultInput = true;
\r
2092 jack_client_close(client);
\r
2093 info.probed = true;
\r
2097 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2099 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2101 RtApiJack *object = (RtApiJack *) info->object;
\r
2102 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2107 // This function will be called by a spawned thread when the Jack
\r
2108 // server signals that it is shutting down. It is necessary to handle
\r
2109 // it this way because the jackShutdown() function must return before
\r
2110 // the jack_deactivate() function (in closeStream()) will return.
\r
2111 static void *jackCloseStream( void *ptr )
\r
2113 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2114 RtApiJack *object = (RtApiJack *) info->object;
\r
2116 object->closeStream();
\r
2118 pthread_exit( NULL );
\r
2120 static void jackShutdown( void *infoPointer )
\r
2122 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2123 RtApiJack *object = (RtApiJack *) info->object;
\r
2125 // Check current stream state. If stopped, then we'll assume this
\r
2126 // was called as a result of a call to RtApiJack::stopStream (the
\r
2127 // deactivation of a client handle causes this function to be called).
\r
2128 // If not, we'll assume the Jack server is shutting down or some
\r
2129 // other problem occurred and we should close the stream.
\r
2130 if ( object->isStreamRunning() == false ) return;
\r
2132 ThreadHandle threadId;
\r
2133 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2134 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2137 static int jackXrun( void *infoPointer )
\r
2139 JackHandle *handle = (JackHandle *) infoPointer;
\r
2141 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2142 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2147 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2148 unsigned int firstChannel, unsigned int sampleRate,
\r
2149 RtAudioFormat format, unsigned int *bufferSize,
\r
2150 RtAudio::StreamOptions *options )
\r
2152 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2154 // Look for jack server and try to become a client (only do once per stream).
\r
2155 jack_client_t *client = 0;
\r
2156 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2157 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2158 jack_status_t *status = NULL;
\r
2159 if ( options && !options->streamName.empty() )
\r
2160 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2162 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2163 if ( client == 0 ) {
\r
2164 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2165 error( RtAudioError::WARNING );
\r
2170 // The handle must have been created on an earlier pass.
\r
2171 client = handle->client;
\r
2174 const char **ports;
\r
2175 std::string port, previousPort, deviceName;
\r
2176 unsigned int nPorts = 0, nDevices = 0;
\r
2177 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2179 // Parse the port names up to the first colon (:).
\r
2180 size_t iColon = 0;
\r
2182 port = (char *) ports[ nPorts ];
\r
2183 iColon = port.find(":");
\r
2184 if ( iColon != std::string::npos ) {
\r
2185 port = port.substr( 0, iColon );
\r
2186 if ( port != previousPort ) {
\r
2187 if ( nDevices == device ) deviceName = port;
\r
2189 previousPort = port;
\r
2192 } while ( ports[++nPorts] );
\r
2196 if ( device >= nDevices ) {
\r
2197 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2201 // Count the available ports containing the client name as device
\r
2202 // channels. Jack "input ports" equal RtAudio output channels.
\r
2203 unsigned int nChannels = 0;
\r
2204 unsigned long flag = JackPortIsInput;
\r
2205 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2206 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2208 while ( ports[ nChannels ] ) nChannels++;
\r
2212 // Compare the jack ports for specified client to the requested number of channels.
\r
2213 if ( nChannels < (channels + firstChannel) ) {
\r
2214 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2215 errorText_ = errorStream_.str();
\r
2219 // Check the jack server sample rate.
\r
2220 unsigned int jackRate = jack_get_sample_rate( client );
\r
2221 if ( sampleRate != jackRate ) {
\r
2222 jack_client_close( client );
\r
2223 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2224 errorText_ = errorStream_.str();
\r
2227 stream_.sampleRate = jackRate;
\r
2229 // Get the latency of the JACK port.
\r
2230 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2231 if ( ports[ firstChannel ] ) {
\r
2232 // Added by Ge Wang
\r
2233 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2234 // the range (usually the min and max are equal)
\r
2235 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2236 // get the latency range
\r
2237 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2238 // be optimistic, use the min!
\r
2239 stream_.latency[mode] = latrange.min;
\r
2240 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2244 // The jack server always uses 32-bit floating-point data.
\r
2245 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2246 stream_.userFormat = format;
\r
2248 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2249 else stream_.userInterleaved = true;
\r
2251 // Jack always uses non-interleaved buffers.
\r
2252 stream_.deviceInterleaved[mode] = false;
\r
2254 // Jack always provides host byte-ordered data.
\r
2255 stream_.doByteSwap[mode] = false;
\r
2257 // Get the buffer size. The buffer size and number of buffers
\r
2258 // (periods) is set when the jack server is started.
\r
2259 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2260 *bufferSize = stream_.bufferSize;
\r
2262 stream_.nDeviceChannels[mode] = channels;
\r
2263 stream_.nUserChannels[mode] = channels;
\r
2265 // Set flags for buffer conversion.
\r
2266 stream_.doConvertBuffer[mode] = false;
\r
2267 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2268 stream_.doConvertBuffer[mode] = true;
\r
2269 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2270 stream_.nUserChannels[mode] > 1 )
\r
2271 stream_.doConvertBuffer[mode] = true;
\r
2273 // Allocate our JackHandle structure for the stream.
\r
2274 if ( handle == 0 ) {
\r
2276 handle = new JackHandle;
\r
2278 catch ( std::bad_alloc& ) {
\r
2279 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2283 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2284 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2287 stream_.apiHandle = (void *) handle;
\r
2288 handle->client = client;
\r
2290 handle->deviceName[mode] = deviceName;
\r
2292 // Allocate necessary internal buffers.
\r
2293 unsigned long bufferBytes;
\r
2294 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2295 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2296 if ( stream_.userBuffer[mode] == NULL ) {
\r
2297 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2301 if ( stream_.doConvertBuffer[mode] ) {
\r
2303 bool makeBuffer = true;
\r
2304 if ( mode == OUTPUT )
\r
2305 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2306 else { // mode == INPUT
\r
2307 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2308 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2309 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2310 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2314 if ( makeBuffer ) {
\r
2315 bufferBytes *= *bufferSize;
\r
2316 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2317 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2318 if ( stream_.deviceBuffer == NULL ) {
\r
2319 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2325 // Allocate memory for the Jack ports (channels) identifiers.
\r
2326 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2327 if ( handle->ports[mode] == NULL ) {
\r
2328 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2332 stream_.device[mode] = device;
\r
2333 stream_.channelOffset[mode] = firstChannel;
\r
2334 stream_.state = STREAM_STOPPED;
\r
2335 stream_.callbackInfo.object = (void *) this;
\r
2337 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2338 // We had already set up the stream for output.
\r
2339 stream_.mode = DUPLEX;
\r
2341 stream_.mode = mode;
\r
2342 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2343 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2344 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2347 // Register our ports.
\r
2349 if ( mode == OUTPUT ) {
\r
2350 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2351 snprintf( label, 64, "outport %d", i );
\r
2352 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2353 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2357 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2358 snprintf( label, 64, "inport %d", i );
\r
2359 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2360 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2364 // Setup the buffer conversion information structure. We don't use
\r
2365 // buffers to do channel offsets, so we override that parameter
\r
2367 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2373 pthread_cond_destroy( &handle->condition );
\r
2374 jack_client_close( handle->client );
\r
2376 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2377 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2380 stream_.apiHandle = 0;
\r
2383 for ( int i=0; i<2; i++ ) {
\r
2384 if ( stream_.userBuffer[i] ) {
\r
2385 free( stream_.userBuffer[i] );
\r
2386 stream_.userBuffer[i] = 0;
\r
2390 if ( stream_.deviceBuffer ) {
\r
2391 free( stream_.deviceBuffer );
\r
2392 stream_.deviceBuffer = 0;
\r
2398 void RtApiJack :: closeStream( void )
\r
2400 if ( stream_.state == STREAM_CLOSED ) {
\r
2401 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2402 error( RtAudioError::WARNING );
\r
2406 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2409 if ( stream_.state == STREAM_RUNNING )
\r
2410 jack_deactivate( handle->client );
\r
2412 jack_client_close( handle->client );
\r
2416 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2417 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2418 pthread_cond_destroy( &handle->condition );
\r
2420 stream_.apiHandle = 0;
\r
2423 for ( int i=0; i<2; i++ ) {
\r
2424 if ( stream_.userBuffer[i] ) {
\r
2425 free( stream_.userBuffer[i] );
\r
2426 stream_.userBuffer[i] = 0;
\r
2430 if ( stream_.deviceBuffer ) {
\r
2431 free( stream_.deviceBuffer );
\r
2432 stream_.deviceBuffer = 0;
\r
2435 stream_.mode = UNINITIALIZED;
\r
2436 stream_.state = STREAM_CLOSED;
\r
2439 void RtApiJack :: startStream( void )
\r
2442 RtApi::startStream();
\r
2443 if ( stream_.state == STREAM_RUNNING ) {
\r
2444 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2445 error( RtAudioError::WARNING );
\r
2449 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2450 int result = jack_activate( handle->client );
\r
2452 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2456 const char **ports;
\r
2458 // Get the list of available ports.
\r
2459 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2461 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2462 if ( ports == NULL) {
\r
2463 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2467 // Now make the port connections. Since RtAudio wasn't designed to
\r
2468 // allow the user to select particular channels of a device, we'll
\r
2469 // just open the first "nChannels" ports with offset.
\r
2470 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2472 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2473 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2476 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2483 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2485 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2486 if ( ports == NULL) {
\r
2487 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2491 // Now make the port connections. See note above.
\r
2492 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2494 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2495 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2498 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2505 handle->drainCounter = 0;
\r
2506 handle->internalDrain = false;
\r
2507 stream_.state = STREAM_RUNNING;
\r
2510 if ( result == 0 ) return;
\r
2511 error( RtAudioError::SYSTEM_ERROR );
\r
2514 void RtApiJack :: stopStream( void )
\r
2517 if ( stream_.state == STREAM_STOPPED ) {
\r
2518 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2519 error( RtAudioError::WARNING );
\r
2523 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2524 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2526 if ( handle->drainCounter == 0 ) {
\r
2527 handle->drainCounter = 2;
\r
2528 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2532 jack_deactivate( handle->client );
\r
2533 stream_.state = STREAM_STOPPED;
\r
2536 void RtApiJack :: abortStream( void )
\r
2539 if ( stream_.state == STREAM_STOPPED ) {
\r
2540 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2541 error( RtAudioError::WARNING );
\r
2545 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2546 handle->drainCounter = 2;
\r
2551 // This function will be called by a spawned thread when the user
\r
2552 // callback function signals that the stream should be stopped or
\r
2553 // aborted. It is necessary to handle it this way because the
\r
2554 // callbackEvent() function must return before the jack_deactivate()
\r
2555 // function will return.
\r
2556 static void *jackStopStream( void *ptr )
\r
2558 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2559 RtApiJack *object = (RtApiJack *) info->object;
\r
2561 object->stopStream();
\r
2562 pthread_exit( NULL );
\r
2565 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2567 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2568 if ( stream_.state == STREAM_CLOSED ) {
\r
2569 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2570 error( RtAudioError::WARNING );
\r
2573 if ( stream_.bufferSize != nframes ) {
\r
2574 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2575 error( RtAudioError::WARNING );
\r
2579 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2580 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2582 // Check if we were draining the stream and signal is finished.
\r
2583 if ( handle->drainCounter > 3 ) {
\r
2584 ThreadHandle threadId;
\r
2586 stream_.state = STREAM_STOPPING;
\r
2587 if ( handle->internalDrain == true )
\r
2588 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2590 pthread_cond_signal( &handle->condition );
\r
2594 // Invoke user callback first, to get fresh output data.
\r
2595 if ( handle->drainCounter == 0 ) {
\r
2596 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2597 double streamTime = getStreamTime();
\r
2598 RtAudioStreamStatus status = 0;
\r
2599 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2600 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2601 handle->xrun[0] = false;
\r
2603 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2604 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2605 handle->xrun[1] = false;
\r
2607 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2608 stream_.bufferSize, streamTime, status, info->userData );
\r
2609 if ( cbReturnValue == 2 ) {
\r
2610 stream_.state = STREAM_STOPPING;
\r
2611 handle->drainCounter = 2;
\r
2613 pthread_create( &id, NULL, jackStopStream, info );
\r
2616 else if ( cbReturnValue == 1 ) {
\r
2617 handle->drainCounter = 1;
\r
2618 handle->internalDrain = true;
\r
2622 jack_default_audio_sample_t *jackbuffer;
\r
2623 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2624 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2626 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2628 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2629 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2630 memset( jackbuffer, 0, bufferBytes );
\r
2634 else if ( stream_.doConvertBuffer[0] ) {
\r
2636 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2638 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2639 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2640 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2643 else { // no buffer conversion
\r
2644 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2645 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2646 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2651 // Don't bother draining input
\r
2652 if ( handle->drainCounter ) {
\r
2653 handle->drainCounter++;
\r
2657 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2659 if ( stream_.doConvertBuffer[1] ) {
\r
2660 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2661 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2662 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2664 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2666 else { // no buffer conversion
\r
2667 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2668 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2669 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2675 RtApi::tickStreamTime();
\r
2678 //******************** End of __UNIX_JACK__ *********************//
\r
2681 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2683 // The ASIO API is designed around a callback scheme, so this
\r
2684 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2685 // Jack. The primary constraint with ASIO is that it only allows
\r
2686 // access to a single driver at a time. Thus, it is not possible to
\r
2687 // have more than one simultaneous RtAudio stream.
\r
2689 // This implementation also requires a number of external ASIO files
\r
2690 // and a few global variables. The ASIO callback scheme does not
\r
2691 // allow for the passing of user data, so we must create a global
\r
2692 // pointer to our callbackInfo structure.
\r
2694 // On unix systems, we make use of a pthread condition variable.
\r
2695 // Since there is no equivalent in Windows, I hacked something based
\r
2696 // on information found in
\r
2697 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2699 #include "asiosys.h"
\r
2701 #include "iasiothiscallresolver.h"
\r
2702 #include "asiodrivers.h"
\r
2705 static AsioDrivers drivers;
\r
2706 static ASIOCallbacks asioCallbacks;
\r
2707 static ASIODriverInfo driverInfo;
\r
2708 static CallbackInfo *asioCallbackInfo;
\r
2709 static bool asioXRun;
\r
2711 struct AsioHandle {
\r
2712 int drainCounter; // Tracks callback counts when draining
\r
2713 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2714 ASIOBufferInfo *bufferInfos;
\r
2718 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2721 // Function declarations (definitions at end of section)
\r
2722 static const char* getAsioErrorString( ASIOError result );
\r
2723 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2724 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2726 RtApiAsio :: RtApiAsio()
\r
2728 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2729 // CoInitialize beforehand, but it must be for appartment threading
\r
2730 // (in which case, CoInitilialize will return S_FALSE here).
\r
2731 coInitialized_ = false;
\r
2732 HRESULT hr = CoInitialize( NULL );
\r
2733 if ( FAILED(hr) ) {
\r
2734 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2735 error( RtAudioError::WARNING );
\r
2737 coInitialized_ = true;
\r
2739 drivers.removeCurrentDriver();
\r
2740 driverInfo.asioVersion = 2;
\r
2742 // See note in DirectSound implementation about GetDesktopWindow().
\r
2743 driverInfo.sysRef = GetForegroundWindow();
\r
2746 RtApiAsio :: ~RtApiAsio()
\r
2748 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2749 if ( coInitialized_ ) CoUninitialize();
\r
2752 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2754 return (unsigned int) drivers.asioGetNumDev();
\r
2757 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2759 RtAudio::DeviceInfo info;
\r
2760 info.probed = false;
\r
2763 unsigned int nDevices = getDeviceCount();
\r
2764 if ( nDevices == 0 ) {
\r
2765 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2766 error( RtAudioError::INVALID_USE );
\r
2770 if ( device >= nDevices ) {
\r
2771 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2772 error( RtAudioError::INVALID_USE );
\r
2776 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2777 if ( stream_.state != STREAM_CLOSED ) {
\r
2778 if ( device >= devices_.size() ) {
\r
2779 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2780 error( RtAudioError::WARNING );
\r
2783 return devices_[ device ];
\r
2786 char driverName[32];
\r
2787 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2788 if ( result != ASE_OK ) {
\r
2789 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2790 errorText_ = errorStream_.str();
\r
2791 error( RtAudioError::WARNING );
\r
2795 info.name = driverName;
\r
2797 if ( !drivers.loadDriver( driverName ) ) {
\r
2798 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2799 errorText_ = errorStream_.str();
\r
2800 error( RtAudioError::WARNING );
\r
2804 result = ASIOInit( &driverInfo );
\r
2805 if ( result != ASE_OK ) {
\r
2806 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2807 errorText_ = errorStream_.str();
\r
2808 error( RtAudioError::WARNING );
\r
2812 // Determine the device channel information.
\r
2813 long inputChannels, outputChannels;
\r
2814 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2815 if ( result != ASE_OK ) {
\r
2816 drivers.removeCurrentDriver();
\r
2817 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2818 errorText_ = errorStream_.str();
\r
2819 error( RtAudioError::WARNING );
\r
2823 info.outputChannels = outputChannels;
\r
2824 info.inputChannels = inputChannels;
\r
2825 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2826 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2828 // Determine the supported sample rates.
\r
2829 info.sampleRates.clear();
\r
2830 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2831 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2832 if ( result == ASE_OK ) {
\r
2833 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2835 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2836 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2840 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2841 ASIOChannelInfo channelInfo;
\r
2842 channelInfo.channel = 0;
\r
2843 channelInfo.isInput = true;
\r
2844 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2845 result = ASIOGetChannelInfo( &channelInfo );
\r
2846 if ( result != ASE_OK ) {
\r
2847 drivers.removeCurrentDriver();
\r
2848 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2849 errorText_ = errorStream_.str();
\r
2850 error( RtAudioError::WARNING );
\r
2854 info.nativeFormats = 0;
\r
2855 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2856 info.nativeFormats |= RTAUDIO_SINT16;
\r
2857 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2858 info.nativeFormats |= RTAUDIO_SINT32;
\r
2859 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2860 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2861 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2862 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2863 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2864 info.nativeFormats |= RTAUDIO_SINT24;
\r
2866 if ( info.outputChannels > 0 )
\r
2867 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2868 if ( info.inputChannels > 0 )
\r
2869 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2871 info.probed = true;
\r
2872 drivers.removeCurrentDriver();
\r
2876 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2878 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2879 object->callbackEvent( index );
\r
2882 void RtApiAsio :: saveDeviceInfo( void )
\r
2886 unsigned int nDevices = getDeviceCount();
\r
2887 devices_.resize( nDevices );
\r
2888 for ( unsigned int i=0; i<nDevices; i++ )
\r
2889 devices_[i] = getDeviceInfo( i );
\r
2892 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2893 unsigned int firstChannel, unsigned int sampleRate,
\r
2894 RtAudioFormat format, unsigned int *bufferSize,
\r
2895 RtAudio::StreamOptions *options )
\r
2896 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2898 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2900 // For ASIO, a duplex stream MUST use the same driver.
\r
2901 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2902 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2906 char driverName[32];
\r
2907 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2908 if ( result != ASE_OK ) {
\r
2909 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2910 errorText_ = errorStream_.str();
\r
2914 // Only load the driver once for duplex stream.
\r
2915 if ( !isDuplexInput ) {
\r
2916 // The getDeviceInfo() function will not work when a stream is open
\r
2917 // because ASIO does not allow multiple devices to run at the same
\r
2918 // time. Thus, we'll probe the system before opening a stream and
\r
2919 // save the results for use by getDeviceInfo().
\r
2920 this->saveDeviceInfo();
\r
2922 if ( !drivers.loadDriver( driverName ) ) {
\r
2923 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2924 errorText_ = errorStream_.str();
\r
2928 result = ASIOInit( &driverInfo );
\r
2929 if ( result != ASE_OK ) {
\r
2930 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2931 errorText_ = errorStream_.str();
\r
2936 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2937 bool buffersAllocated = false;
\r
2938 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2939 unsigned int nChannels;
\r
2942 // Check the device channel count.
\r
2943 long inputChannels, outputChannels;
\r
2944 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2945 if ( result != ASE_OK ) {
\r
2946 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2947 errorText_ = errorStream_.str();
\r
2951 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2952 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2953 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2954 errorText_ = errorStream_.str();
\r
2957 stream_.nDeviceChannels[mode] = channels;
\r
2958 stream_.nUserChannels[mode] = channels;
\r
2959 stream_.channelOffset[mode] = firstChannel;
\r
2961 // Verify the sample rate is supported.
\r
2962 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2963 if ( result != ASE_OK ) {
\r
2964 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2965 errorText_ = errorStream_.str();
\r
2969 // Get the current sample rate
\r
2970 ASIOSampleRate currentRate;
\r
2971 result = ASIOGetSampleRate( ¤tRate );
\r
2972 if ( result != ASE_OK ) {
\r
2973 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2974 errorText_ = errorStream_.str();
\r
2978 // Set the sample rate only if necessary
\r
2979 if ( currentRate != sampleRate ) {
\r
2980 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2981 if ( result != ASE_OK ) {
\r
2982 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2983 errorText_ = errorStream_.str();
\r
2988 // Determine the driver data type.
\r
2989 ASIOChannelInfo channelInfo;
\r
2990 channelInfo.channel = 0;
\r
2991 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2992 else channelInfo.isInput = true;
\r
2993 result = ASIOGetChannelInfo( &channelInfo );
\r
2994 if ( result != ASE_OK ) {
\r
2995 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2996 errorText_ = errorStream_.str();
\r
3000 // Assuming WINDOWS host is always little-endian.
\r
3001 stream_.doByteSwap[mode] = false;
\r
3002 stream_.userFormat = format;
\r
3003 stream_.deviceFormat[mode] = 0;
\r
3004 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
3005 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3006 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
3008 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
3009 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
3010 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3012 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3013 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3014 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3016 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3017 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3018 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3020 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3021 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3022 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3025 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3026 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3027 errorText_ = errorStream_.str();
\r
3031 // Set the buffer size. For a duplex stream, this will end up
\r
3032 // setting the buffer size based on the input constraints, which
\r
3034 long minSize, maxSize, preferSize, granularity;
\r
3035 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3036 if ( result != ASE_OK ) {
\r
3037 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3038 errorText_ = errorStream_.str();
\r
3042 if ( isDuplexInput ) {
\r
3043 // When this is the duplex input (output was opened before), then we have to use the same
\r
3044 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3045 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3046 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3047 // to the "bufferSize" param as usual to set up processing buffers.
\r
3049 *bufferSize = stream_.bufferSize;
\r
3052 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3053 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3054 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3055 else if ( granularity == -1 ) {
\r
3056 // Make sure bufferSize is a power of two.
\r
3057 int log2_of_min_size = 0;
\r
3058 int log2_of_max_size = 0;
\r
3060 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3061 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3062 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3065 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3066 int min_delta_num = log2_of_min_size;
\r
3068 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3069 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3070 if (current_delta < min_delta) {
\r
3071 min_delta = current_delta;
\r
3072 min_delta_num = i;
\r
3076 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3077 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3078 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3080 else if ( granularity != 0 ) {
\r
3081 // Set to an even multiple of granularity, rounding up.
\r
3082 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3087 // we don't use it anymore, see above!
\r
3088 // Just left it here for the case...
\r
3089 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3090 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3095 stream_.bufferSize = *bufferSize;
\r
3096 stream_.nBuffers = 2;
\r
3098 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3099 else stream_.userInterleaved = true;
\r
3101 // ASIO always uses non-interleaved buffers.
\r
3102 stream_.deviceInterleaved[mode] = false;
\r
3104 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3105 if ( handle == 0 ) {
\r
3107 handle = new AsioHandle;
\r
3109 catch ( std::bad_alloc& ) {
\r
3110 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3113 handle->bufferInfos = 0;
\r
3115 // Create a manual-reset event.
\r
3116 handle->condition = CreateEvent( NULL, // no security
\r
3117 TRUE, // manual-reset
\r
3118 FALSE, // non-signaled initially
\r
3119 NULL ); // unnamed
\r
3120 stream_.apiHandle = (void *) handle;
\r
3123 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3124 // and output separately, we'll have to dispose of previously
\r
3125 // created output buffers for a duplex stream.
\r
3126 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3127 ASIODisposeBuffers();
\r
3128 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3131 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3133 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3134 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3135 if ( handle->bufferInfos == NULL ) {
\r
3136 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3137 errorText_ = errorStream_.str();
\r
3141 ASIOBufferInfo *infos;
\r
3142 infos = handle->bufferInfos;
\r
3143 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3144 infos->isInput = ASIOFalse;
\r
3145 infos->channelNum = i + stream_.channelOffset[0];
\r
3146 infos->buffers[0] = infos->buffers[1] = 0;
\r
3148 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3149 infos->isInput = ASIOTrue;
\r
3150 infos->channelNum = i + stream_.channelOffset[1];
\r
3151 infos->buffers[0] = infos->buffers[1] = 0;
\r
3154 // prepare for callbacks
\r
3155 stream_.sampleRate = sampleRate;
\r
3156 stream_.device[mode] = device;
\r
3157 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3159 // store this class instance before registering callbacks, that are going to use it
\r
3160 asioCallbackInfo = &stream_.callbackInfo;
\r
3161 stream_.callbackInfo.object = (void *) this;
\r
3163 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3164 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3165 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3166 asioCallbacks.asioMessage = &asioMessages;
\r
3167 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3168 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3169 if ( result != ASE_OK ) {
\r
3170 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3171 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3172 // in that case, let's be naïve and try that instead
\r
3173 *bufferSize = preferSize;
\r
3174 stream_.bufferSize = *bufferSize;
\r
3175 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3178 if ( result != ASE_OK ) {
\r
3179 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3180 errorText_ = errorStream_.str();
\r
3183 buffersAllocated = true;
\r
3184 stream_.state = STREAM_STOPPED;
\r
3186 // Set flags for buffer conversion.
\r
3187 stream_.doConvertBuffer[mode] = false;
\r
3188 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3189 stream_.doConvertBuffer[mode] = true;
\r
3190 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3191 stream_.nUserChannels[mode] > 1 )
\r
3192 stream_.doConvertBuffer[mode] = true;
\r
3194 // Allocate necessary internal buffers
\r
3195 unsigned long bufferBytes;
\r
3196 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3197 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3198 if ( stream_.userBuffer[mode] == NULL ) {
\r
3199 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3203 if ( stream_.doConvertBuffer[mode] ) {
\r
3205 bool makeBuffer = true;
\r
3206 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3207 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3208 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3209 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3212 if ( makeBuffer ) {
\r
3213 bufferBytes *= *bufferSize;
\r
3214 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3215 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3216 if ( stream_.deviceBuffer == NULL ) {
\r
3217 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3223 // Determine device latencies
\r
3224 long inputLatency, outputLatency;
\r
3225 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3226 if ( result != ASE_OK ) {
\r
3227 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3228 errorText_ = errorStream_.str();
\r
3229 error( RtAudioError::WARNING); // warn but don't fail
\r
3232 stream_.latency[0] = outputLatency;
\r
3233 stream_.latency[1] = inputLatency;
\r
3236 // Setup the buffer conversion information structure. We don't use
\r
3237 // buffers to do channel offsets, so we override that parameter
\r
3239 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3244 if ( !isDuplexInput ) {
\r
3245 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3246 // So we clean up for single channel only
\r
3248 if ( buffersAllocated )
\r
3249 ASIODisposeBuffers();
\r
3251 drivers.removeCurrentDriver();
\r
3254 CloseHandle( handle->condition );
\r
3255 if ( handle->bufferInfos )
\r
3256 free( handle->bufferInfos );
\r
3259 stream_.apiHandle = 0;
\r
3263 if ( stream_.userBuffer[mode] ) {
\r
3264 free( stream_.userBuffer[mode] );
\r
3265 stream_.userBuffer[mode] = 0;
\r
3268 if ( stream_.deviceBuffer ) {
\r
3269 free( stream_.deviceBuffer );
\r
3270 stream_.deviceBuffer = 0;
\r
3275 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3277 void RtApiAsio :: closeStream()
\r
3279 if ( stream_.state == STREAM_CLOSED ) {
\r
3280 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3281 error( RtAudioError::WARNING );
\r
3285 if ( stream_.state == STREAM_RUNNING ) {
\r
3286 stream_.state = STREAM_STOPPED;
\r
3289 ASIODisposeBuffers();
\r
3290 drivers.removeCurrentDriver();
\r
3292 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3294 CloseHandle( handle->condition );
\r
3295 if ( handle->bufferInfos )
\r
3296 free( handle->bufferInfos );
\r
3298 stream_.apiHandle = 0;
\r
3301 for ( int i=0; i<2; i++ ) {
\r
3302 if ( stream_.userBuffer[i] ) {
\r
3303 free( stream_.userBuffer[i] );
\r
3304 stream_.userBuffer[i] = 0;
\r
3308 if ( stream_.deviceBuffer ) {
\r
3309 free( stream_.deviceBuffer );
\r
3310 stream_.deviceBuffer = 0;
\r
3313 stream_.mode = UNINITIALIZED;
\r
3314 stream_.state = STREAM_CLOSED;
\r
3317 bool stopThreadCalled = false;
\r
3319 void RtApiAsio :: startStream()
\r
3322 RtApi::startStream();
\r
3323 if ( stream_.state == STREAM_RUNNING ) {
\r
3324 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3325 error( RtAudioError::WARNING );
\r
3329 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3330 ASIOError result = ASIOStart();
\r
3331 if ( result != ASE_OK ) {
\r
3332 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3333 errorText_ = errorStream_.str();
\r
3337 handle->drainCounter = 0;
\r
3338 handle->internalDrain = false;
\r
3339 ResetEvent( handle->condition );
\r
3340 stream_.state = STREAM_RUNNING;
\r
3344 stopThreadCalled = false;
\r
3346 if ( result == ASE_OK ) return;
\r
3347 error( RtAudioError::SYSTEM_ERROR );
\r
3350 void RtApiAsio :: stopStream()
\r
3353 if ( stream_.state == STREAM_STOPPED ) {
\r
3354 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3355 error( RtAudioError::WARNING );
\r
3359 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3360 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3361 if ( handle->drainCounter == 0 ) {
\r
3362 handle->drainCounter = 2;
\r
3363 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3367 stream_.state = STREAM_STOPPED;
\r
3369 ASIOError result = ASIOStop();
\r
3370 if ( result != ASE_OK ) {
\r
3371 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3372 errorText_ = errorStream_.str();
\r
3375 if ( result == ASE_OK ) return;
\r
3376 error( RtAudioError::SYSTEM_ERROR );
\r
3379 void RtApiAsio :: abortStream()
\r
3382 if ( stream_.state == STREAM_STOPPED ) {
\r
3383 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3384 error( RtAudioError::WARNING );
\r
3388 // The following lines were commented-out because some behavior was
\r
3389 // noted where the device buffers need to be zeroed to avoid
\r
3390 // continuing sound, even when the device buffers are completely
\r
3391 // disposed. So now, calling abort is the same as calling stop.
\r
3392 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3393 // handle->drainCounter = 2;
\r
3397 // This function will be called by a spawned thread when the user
\r
3398 // callback function signals that the stream should be stopped or
\r
3399 // aborted. It is necessary to handle it this way because the
\r
3400 // callbackEvent() function must return before the ASIOStop()
\r
3401 // function will return.
\r
3402 static unsigned __stdcall asioStopStream( void *ptr )
\r
3404 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3405 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3407 object->stopStream();
\r
3408 _endthreadex( 0 );
\r
3412 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3414 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3415 if ( stream_.state == STREAM_CLOSED ) {
\r
3416 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3417 error( RtAudioError::WARNING );
\r
3421 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3422 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3424 // Check if we were draining the stream and signal if finished.
\r
3425 if ( handle->drainCounter > 3 ) {
\r
3427 stream_.state = STREAM_STOPPING;
\r
3428 if ( handle->internalDrain == false )
\r
3429 SetEvent( handle->condition );
\r
3430 else { // spawn a thread to stop the stream
\r
3431 unsigned threadId;
\r
3432 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3433 &stream_.callbackInfo, 0, &threadId );
\r
3438 // Invoke user callback to get fresh output data UNLESS we are
\r
3439 // draining stream.
\r
3440 if ( handle->drainCounter == 0 ) {
\r
3441 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3442 double streamTime = getStreamTime();
\r
3443 RtAudioStreamStatus status = 0;
\r
3444 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3445 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3448 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3449 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3452 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3453 stream_.bufferSize, streamTime, status, info->userData );
\r
3454 if ( cbReturnValue == 2 ) {
\r
3455 stream_.state = STREAM_STOPPING;
\r
3456 handle->drainCounter = 2;
\r
3457 unsigned threadId;
\r
3458 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3459 &stream_.callbackInfo, 0, &threadId );
\r
3462 else if ( cbReturnValue == 1 ) {
\r
3463 handle->drainCounter = 1;
\r
3464 handle->internalDrain = true;
\r
3468 unsigned int nChannels, bufferBytes, i, j;
\r
3469 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3470 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3472 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3474 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3476 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3477 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3478 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3482 else if ( stream_.doConvertBuffer[0] ) {
\r
3484 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3485 if ( stream_.doByteSwap[0] )
\r
3486 byteSwapBuffer( stream_.deviceBuffer,
\r
3487 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3488 stream_.deviceFormat[0] );
\r
3490 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3491 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3492 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3493 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3499 if ( stream_.doByteSwap[0] )
\r
3500 byteSwapBuffer( stream_.userBuffer[0],
\r
3501 stream_.bufferSize * stream_.nUserChannels[0],
\r
3502 stream_.userFormat );
\r
3504 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3505 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3506 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3507 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3513 // Don't bother draining input
\r
3514 if ( handle->drainCounter ) {
\r
3515 handle->drainCounter++;
\r
3519 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3521 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3523 if (stream_.doConvertBuffer[1]) {
\r
3525 // Always interleave ASIO input data.
\r
3526 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3527 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3528 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3529 handle->bufferInfos[i].buffers[bufferIndex],
\r
3533 if ( stream_.doByteSwap[1] )
\r
3534 byteSwapBuffer( stream_.deviceBuffer,
\r
3535 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3536 stream_.deviceFormat[1] );
\r
3537 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3541 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3542 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3543 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3544 handle->bufferInfos[i].buffers[bufferIndex],
\r
3549 if ( stream_.doByteSwap[1] )
\r
3550 byteSwapBuffer( stream_.userBuffer[1],
\r
3551 stream_.bufferSize * stream_.nUserChannels[1],
\r
3552 stream_.userFormat );
\r
3557 // The following call was suggested by Malte Clasen. While the API
\r
3558 // documentation indicates it should not be required, some device
\r
3559 // drivers apparently do not function correctly without it.
\r
3560 ASIOOutputReady();
\r
3562 RtApi::tickStreamTime();
\r
3566 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3568 // The ASIO documentation says that this usually only happens during
\r
3569 // external sync. Audio processing is not stopped by the driver,
\r
3570 // actual sample rate might not have even changed, maybe only the
\r
3571 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3574 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3576 object->stopStream();
\r
3578 catch ( RtAudioError &exception ) {
\r
3579 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3583 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3586 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3590 switch( selector ) {
\r
3591 case kAsioSelectorSupported:
\r
3592 if ( value == kAsioResetRequest
\r
3593 || value == kAsioEngineVersion
\r
3594 || value == kAsioResyncRequest
\r
3595 || value == kAsioLatenciesChanged
\r
3596 // The following three were added for ASIO 2.0, you don't
\r
3597 // necessarily have to support them.
\r
3598 || value == kAsioSupportsTimeInfo
\r
3599 || value == kAsioSupportsTimeCode
\r
3600 || value == kAsioSupportsInputMonitor)
\r
3603 case kAsioResetRequest:
\r
3604 // Defer the task and perform the reset of the driver during the
\r
3605 // next "safe" situation. You cannot reset the driver right now,
\r
3606 // as this code is called from the driver. Reset the driver is
\r
3607 // done by completely destruct is. I.e. ASIOStop(),
\r
3608 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3610 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3613 case kAsioResyncRequest:
\r
3614 // This informs the application that the driver encountered some
\r
3615 // non-fatal data loss. It is used for synchronization purposes
\r
3616 // of different media. Added mainly to work around the Win16Mutex
\r
3617 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3618 // which could lose data because the Mutex was held too long by
\r
3619 // another thread. However a driver can issue it in other
\r
3620 // situations, too.
\r
3621 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3625 case kAsioLatenciesChanged:
\r
3626 // This will inform the host application that the drivers were
\r
3627 // latencies changed. Beware, it this does not mean that the
\r
3628 // buffer sizes have changed! You might need to update internal
\r
3630 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3633 case kAsioEngineVersion:
\r
3634 // Return the supported ASIO version of the host application. If
\r
3635 // a host application does not implement this selector, ASIO 1.0
\r
3636 // is assumed by the driver.
\r
3639 case kAsioSupportsTimeInfo:
\r
3640 // Informs the driver whether the
\r
3641 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3642 // For compatibility with ASIO 1.0 drivers the host application
\r
3643 // should always support the "old" bufferSwitch method, too.
\r
3646 case kAsioSupportsTimeCode:
\r
3647 // Informs the driver whether application is interested in time
\r
3648 // code info. If an application does not need to know about time
\r
3649 // code, the driver has less work to do.
\r
3656 static const char* getAsioErrorString( ASIOError result )
\r
3661 const char*message;
\r
3664 static const Messages m[] =
\r
3666 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3667 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3668 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3669 { ASE_InvalidMode, "Invalid mode." },
\r
3670 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3671 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3672 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3675 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3676 if ( m[i].value == result ) return m[i].message;
\r
3678 return "Unknown error.";
\r
3681 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3685 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3687 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3688 // - Introduces support for the Windows WASAPI API
\r
3689 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3690 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3691 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3696 #include <audioclient.h>
\r
3698 #include <mmdeviceapi.h>
\r
3699 #include <FunctionDiscoveryKeys_devpkey.h>
\r
3701 //=============================================================================
\r
3703 #define SAFE_RELEASE( objectPtr )\
\r
3706 objectPtr->Release();\
\r
3707 objectPtr = NULL;\
\r
3710 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3712 //-----------------------------------------------------------------------------
\r
3714 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3715 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3716 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3717 // provide intermediate storage for read / write synchronization.
\r
3718 class WasapiBuffer
\r
3722 : buffer_( NULL ),
\r
3731 // sets the length of the internal ring buffer
\r
3732 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3735 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3737 bufferSize_ = bufferSize;
\r
3742 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3743 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3745 if ( !buffer || // incoming buffer is NULL
\r
3746 bufferSize == 0 || // incoming buffer has no data
\r
3747 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3752 unsigned int relOutIndex = outIndex_;
\r
3753 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3754 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3755 relOutIndex += bufferSize_;
\r
3758 // "in" index can end on the "out" index but cannot begin at it
\r
3759 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3760 return false; // not enough space between "in" index and "out" index
\r
3763 // copy buffer from external to internal
\r
3764 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3765 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3766 int fromInSize = bufferSize - fromZeroSize;
\r
3770 case RTAUDIO_SINT8:
\r
3771 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3772 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3774 case RTAUDIO_SINT16:
\r
3775 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3776 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3778 case RTAUDIO_SINT24:
\r
3779 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3780 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3782 case RTAUDIO_SINT32:
\r
3783 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3784 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3786 case RTAUDIO_FLOAT32:
\r
3787 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3788 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3790 case RTAUDIO_FLOAT64:
\r
3791 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3792 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3796 // update "in" index
\r
3797 inIndex_ += bufferSize;
\r
3798 inIndex_ %= bufferSize_;
\r
3803 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3804 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3806 if ( !buffer || // incoming buffer is NULL
\r
3807 bufferSize == 0 || // incoming buffer has no data
\r
3808 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3813 unsigned int relInIndex = inIndex_;
\r
3814 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3815 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3816 relInIndex += bufferSize_;
\r
3819 // "out" index can begin at and end on the "in" index
\r
3820 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3821 return false; // not enough space between "out" index and "in" index
\r
3824 // copy buffer from internal to external
\r
3825 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3826 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3827 int fromOutSize = bufferSize - fromZeroSize;
\r
3831 case RTAUDIO_SINT8:
\r
3832 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3833 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3835 case RTAUDIO_SINT16:
\r
3836 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3837 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3839 case RTAUDIO_SINT24:
\r
3840 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3841 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3843 case RTAUDIO_SINT32:
\r
3844 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3845 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3847 case RTAUDIO_FLOAT32:
\r
3848 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3849 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3851 case RTAUDIO_FLOAT64:
\r
3852 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3853 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3857 // update "out" index
\r
3858 outIndex_ += bufferSize;
\r
3859 outIndex_ %= bufferSize_;
\r
3866 unsigned int bufferSize_;
\r
3867 unsigned int inIndex_;
\r
3868 unsigned int outIndex_;
\r
3871 //-----------------------------------------------------------------------------
\r
3873 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3874 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3875 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3876 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3877 // one rate and its multiple.
\r
3878 void convertBufferWasapi( char* outBuffer,
\r
3879 const char* inBuffer,
\r
3880 const unsigned int& channelCount,
\r
3881 const unsigned int& inSampleRate,
\r
3882 const unsigned int& outSampleRate,
\r
3883 const unsigned int& inSampleCount,
\r
3884 unsigned int& outSampleCount,
\r
3885 const RtAudioFormat& format )
\r
3887 // calculate the new outSampleCount and relative sampleStep
\r
3888 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3889 float sampleStep = 1.0f / sampleRatio;
\r
3890 float inSampleFraction = 0.0f;
\r
3892 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3894 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3895 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3897 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3901 case RTAUDIO_SINT8:
\r
3902 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3904 case RTAUDIO_SINT16:
\r
3905 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3907 case RTAUDIO_SINT24:
\r
3908 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3910 case RTAUDIO_SINT32:
\r
3911 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3913 case RTAUDIO_FLOAT32:
\r
3914 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3916 case RTAUDIO_FLOAT64:
\r
3917 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3921 // jump to next in sample
\r
3922 inSampleFraction += sampleStep;
\r
3926 //-----------------------------------------------------------------------------
\r
3928 // A structure to hold various information related to the WASAPI implementation.
\r
3929 struct WasapiHandle
\r
3931 IAudioClient* captureAudioClient;
\r
3932 IAudioClient* renderAudioClient;
\r
3933 IAudioCaptureClient* captureClient;
\r
3934 IAudioRenderClient* renderClient;
\r
3935 HANDLE captureEvent;
\r
3936 HANDLE renderEvent;
\r
3939 : captureAudioClient( NULL ),
\r
3940 renderAudioClient( NULL ),
\r
3941 captureClient( NULL ),
\r
3942 renderClient( NULL ),
\r
3943 captureEvent( NULL ),
\r
3944 renderEvent( NULL ) {}
\r
3947 //=============================================================================
\r
3949 RtApiWasapi::RtApiWasapi()
\r
3950 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3952 // WASAPI can run either apartment or multi-threaded
\r
3953 HRESULT hr = CoInitialize( NULL );
\r
3954 if ( !FAILED( hr ) )
\r
3955 coInitialized_ = true;
\r
3957 // Instantiate device enumerator
\r
3958 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3959 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3960 ( void** ) &deviceEnumerator_ );
\r
3962 if ( FAILED( hr ) ) {
\r
3963 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3964 error( RtAudioError::DRIVER_ERROR );
\r
3968 //-----------------------------------------------------------------------------
\r
3970 RtApiWasapi::~RtApiWasapi()
\r
3972 if ( stream_.state != STREAM_CLOSED )
\r
3975 SAFE_RELEASE( deviceEnumerator_ );
\r
3977 // If this object previously called CoInitialize()
\r
3978 if ( coInitialized_ )
\r
3982 //=============================================================================
\r
3984 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3986 unsigned int captureDeviceCount = 0;
\r
3987 unsigned int renderDeviceCount = 0;
\r
3989 IMMDeviceCollection* captureDevices = NULL;
\r
3990 IMMDeviceCollection* renderDevices = NULL;
\r
3992 // Count capture devices
\r
3993 errorText_.clear();
\r
3994 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3995 if ( FAILED( hr ) ) {
\r
3996 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4000 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4001 if ( FAILED( hr ) ) {
\r
4002 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4006 // Count render devices
\r
4007 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4008 if ( FAILED( hr ) ) {
\r
4009 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4013 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4014 if ( FAILED( hr ) ) {
\r
4015 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4020 // release all references
\r
4021 SAFE_RELEASE( captureDevices );
\r
4022 SAFE_RELEASE( renderDevices );
\r
4024 if ( errorText_.empty() )
\r
4025 return captureDeviceCount + renderDeviceCount;
\r
4027 error( RtAudioError::DRIVER_ERROR );
\r
4031 //-----------------------------------------------------------------------------
\r
4033 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4035 RtAudio::DeviceInfo info;
\r
4036 unsigned int captureDeviceCount = 0;
\r
4037 unsigned int renderDeviceCount = 0;
\r
4038 std::string defaultDeviceName;
\r
4039 bool isCaptureDevice = false;
\r
4041 PROPVARIANT deviceNameProp;
\r
4042 PROPVARIANT defaultDeviceNameProp;
\r
4044 IMMDeviceCollection* captureDevices = NULL;
\r
4045 IMMDeviceCollection* renderDevices = NULL;
\r
4046 IMMDevice* devicePtr = NULL;
\r
4047 IMMDevice* defaultDevicePtr = NULL;
\r
4048 IAudioClient* audioClient = NULL;
\r
4049 IPropertyStore* devicePropStore = NULL;
\r
4050 IPropertyStore* defaultDevicePropStore = NULL;
\r
4052 WAVEFORMATEX* deviceFormat = NULL;
\r
4053 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4056 info.probed = false;
\r
4058 // Count capture devices
\r
4059 errorText_.clear();
\r
4060 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4061 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4062 if ( FAILED( hr ) ) {
\r
4063 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4067 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4068 if ( FAILED( hr ) ) {
\r
4069 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4073 // Count render devices
\r
4074 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4075 if ( FAILED( hr ) ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4080 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4081 if ( FAILED( hr ) ) {
\r
4082 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4086 // validate device index
\r
4087 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4088 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4089 errorType = RtAudioError::INVALID_USE;
\r
4093 // determine whether index falls within capture or render devices
\r
4094 if ( device >= renderDeviceCount ) {
\r
4095 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4096 if ( FAILED( hr ) ) {
\r
4097 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4100 isCaptureDevice = true;
\r
4103 hr = renderDevices->Item( device, &devicePtr );
\r
4104 if ( FAILED( hr ) ) {
\r
4105 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4108 isCaptureDevice = false;
\r
4111 // get default device name
\r
4112 if ( isCaptureDevice ) {
\r
4113 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4114 if ( FAILED( hr ) ) {
\r
4115 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4120 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4121 if ( FAILED( hr ) ) {
\r
4122 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4127 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4128 if ( FAILED( hr ) ) {
\r
4129 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4132 PropVariantInit( &defaultDeviceNameProp );
\r
4134 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4135 if ( FAILED( hr ) ) {
\r
4136 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4140 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4143 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4144 if ( FAILED( hr ) ) {
\r
4145 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4149 PropVariantInit( &deviceNameProp );
\r
4151 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4152 if ( FAILED( hr ) ) {
\r
4153 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4157 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4160 if ( isCaptureDevice ) {
\r
4161 info.isDefaultInput = info.name == defaultDeviceName;
\r
4162 info.isDefaultOutput = false;
\r
4165 info.isDefaultInput = false;
\r
4166 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4170 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4171 if ( FAILED( hr ) ) {
\r
4172 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4176 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4177 if ( FAILED( hr ) ) {
\r
4178 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4182 if ( isCaptureDevice ) {
\r
4183 info.inputChannels = deviceFormat->nChannels;
\r
4184 info.outputChannels = 0;
\r
4185 info.duplexChannels = 0;
\r
4188 info.inputChannels = 0;
\r
4189 info.outputChannels = deviceFormat->nChannels;
\r
4190 info.duplexChannels = 0;
\r
4194 info.sampleRates.clear();
\r
4196 // allow support for all sample rates as we have a built-in sample rate converter
\r
4197 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4198 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4200 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4203 info.nativeFormats = 0;
\r
4205 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4206 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4207 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4209 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4210 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4212 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4213 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4216 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4217 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4218 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4220 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4221 info.nativeFormats |= RTAUDIO_SINT8;
\r
4223 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4224 info.nativeFormats |= RTAUDIO_SINT16;
\r
4226 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4227 info.nativeFormats |= RTAUDIO_SINT24;
\r
4229 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4230 info.nativeFormats |= RTAUDIO_SINT32;
\r
4235 info.probed = true;
\r
4238 // release all references
\r
4239 PropVariantClear( &deviceNameProp );
\r
4240 PropVariantClear( &defaultDeviceNameProp );
\r
4242 SAFE_RELEASE( captureDevices );
\r
4243 SAFE_RELEASE( renderDevices );
\r
4244 SAFE_RELEASE( devicePtr );
\r
4245 SAFE_RELEASE( defaultDevicePtr );
\r
4246 SAFE_RELEASE( audioClient );
\r
4247 SAFE_RELEASE( devicePropStore );
\r
4248 SAFE_RELEASE( defaultDevicePropStore );
\r
4250 CoTaskMemFree( deviceFormat );
\r
4251 CoTaskMemFree( closestMatchFormat );
\r
4253 if ( !errorText_.empty() )
\r
4254 error( errorType );
\r
4258 //-----------------------------------------------------------------------------
\r
4260 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4262 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4263 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4271 //-----------------------------------------------------------------------------
\r
4273 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4275 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4276 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4284 //-----------------------------------------------------------------------------
\r
4286 void RtApiWasapi::closeStream( void )
\r
4288 if ( stream_.state == STREAM_CLOSED ) {
\r
4289 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4290 error( RtAudioError::WARNING );
\r
4294 if ( stream_.state != STREAM_STOPPED )
\r
4297 // clean up stream memory
\r
4298 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4299 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4301 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4302 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4304 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4305 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4307 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4308 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4310 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4311 stream_.apiHandle = NULL;
\r
4313 for ( int i = 0; i < 2; i++ ) {
\r
4314 if ( stream_.userBuffer[i] ) {
\r
4315 free( stream_.userBuffer[i] );
\r
4316 stream_.userBuffer[i] = 0;
\r
4320 if ( stream_.deviceBuffer ) {
\r
4321 free( stream_.deviceBuffer );
\r
4322 stream_.deviceBuffer = 0;
\r
4325 // update stream state
\r
4326 stream_.state = STREAM_CLOSED;
\r
4329 //-----------------------------------------------------------------------------
\r
4331 void RtApiWasapi::startStream( void )
\r
4334 RtApi::startStream();
\r
4336 if ( stream_.state == STREAM_RUNNING ) {
\r
4337 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4338 error( RtAudioError::WARNING );
\r
4342 // update stream state
\r
4343 stream_.state = STREAM_RUNNING;
\r
4345 // create WASAPI stream thread
\r
4346 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4348 if ( !stream_.callbackInfo.thread ) {
\r
4349 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4350 error( RtAudioError::THREAD_ERROR );
\r
4353 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4354 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4358 //-----------------------------------------------------------------------------
\r
4360 void RtApiWasapi::stopStream( void )
\r
4364 if ( stream_.state == STREAM_STOPPED ) {
\r
4365 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4366 error( RtAudioError::WARNING );
\r
4370 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4371 stream_.state = STREAM_STOPPING;
\r
4373 // wait until stream thread is stopped
\r
4374 while( stream_.state != STREAM_STOPPED ) {
\r
4378 // Wait for the last buffer to play before stopping.
\r
4379 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4381 // stop capture client if applicable
\r
4382 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4383 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4384 if ( FAILED( hr ) ) {
\r
4385 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4386 error( RtAudioError::DRIVER_ERROR );
\r
4391 // stop render client if applicable
\r
4392 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4393 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4394 if ( FAILED( hr ) ) {
\r
4395 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4396 error( RtAudioError::DRIVER_ERROR );
\r
4401 // close thread handle
\r
4402 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4403 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4404 error( RtAudioError::THREAD_ERROR );
\r
4408 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4411 //-----------------------------------------------------------------------------
\r
4413 void RtApiWasapi::abortStream( void )
\r
4417 if ( stream_.state == STREAM_STOPPED ) {
\r
4418 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4419 error( RtAudioError::WARNING );
\r
4423 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4424 stream_.state = STREAM_STOPPING;
\r
4426 // wait until stream thread is stopped
\r
4427 while ( stream_.state != STREAM_STOPPED ) {
\r
4431 // stop capture client if applicable
\r
4432 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4433 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4434 if ( FAILED( hr ) ) {
\r
4435 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4436 error( RtAudioError::DRIVER_ERROR );
\r
4441 // stop render client if applicable
\r
4442 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4443 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4444 if ( FAILED( hr ) ) {
\r
4445 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4446 error( RtAudioError::DRIVER_ERROR );
\r
4451 // close thread handle
\r
4452 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4453 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4454 error( RtAudioError::THREAD_ERROR );
\r
4458 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4461 //-----------------------------------------------------------------------------
\r
4463 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4464 unsigned int firstChannel, unsigned int sampleRate,
\r
4465 RtAudioFormat format, unsigned int* bufferSize,
\r
4466 RtAudio::StreamOptions* options )
\r
4468 bool methodResult = FAILURE;
\r
4469 unsigned int captureDeviceCount = 0;
\r
4470 unsigned int renderDeviceCount = 0;
\r
4472 IMMDeviceCollection* captureDevices = NULL;
\r
4473 IMMDeviceCollection* renderDevices = NULL;
\r
4474 IMMDevice* devicePtr = NULL;
\r
4475 WAVEFORMATEX* deviceFormat = NULL;
\r
4476 unsigned int bufferBytes;
\r
4477 stream_.state = STREAM_STOPPED;
\r
4479 // create API Handle if not already created
\r
4480 if ( !stream_.apiHandle )
\r
4481 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4483 // Count capture devices
\r
4484 errorText_.clear();
\r
4485 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4486 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4487 if ( FAILED( hr ) ) {
\r
4488 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4492 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4493 if ( FAILED( hr ) ) {
\r
4494 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4498 // Count render devices
\r
4499 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4500 if ( FAILED( hr ) ) {
\r
4501 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4505 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4506 if ( FAILED( hr ) ) {
\r
4507 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4511 // validate device index
\r
4512 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4513 errorType = RtAudioError::INVALID_USE;
\r
4514 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4518 // determine whether index falls within capture or render devices
\r
4519 if ( device >= renderDeviceCount ) {
\r
4520 if ( mode != INPUT ) {
\r
4521 errorType = RtAudioError::INVALID_USE;
\r
4522 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4526 // retrieve captureAudioClient from devicePtr
\r
4527 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4529 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4530 if ( FAILED( hr ) ) {
\r
4531 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4535 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4536 NULL, ( void** ) &captureAudioClient );
\r
4537 if ( FAILED( hr ) ) {
\r
4538 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4542 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4543 if ( FAILED( hr ) ) {
\r
4544 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4548 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4549 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4552 if ( mode != OUTPUT ) {
\r
4553 errorType = RtAudioError::INVALID_USE;
\r
4554 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4558 // retrieve renderAudioClient from devicePtr
\r
4559 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4561 hr = renderDevices->Item( device, &devicePtr );
\r
4562 if ( FAILED( hr ) ) {
\r
4563 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4567 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4568 NULL, ( void** ) &renderAudioClient );
\r
4569 if ( FAILED( hr ) ) {
\r
4570 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4574 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4575 if ( FAILED( hr ) ) {
\r
4576 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4580 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4581 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4584 // fill stream data
\r
4585 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4586 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4587 stream_.mode = DUPLEX;
\r
4590 stream_.mode = mode;
\r
4593 stream_.device[mode] = device;
\r
4594 stream_.doByteSwap[mode] = false;
\r
4595 stream_.sampleRate = sampleRate;
\r
4596 stream_.bufferSize = *bufferSize;
\r
4597 stream_.nBuffers = 1;
\r
4598 stream_.nUserChannels[mode] = channels;
\r
4599 stream_.channelOffset[mode] = firstChannel;
\r
4600 stream_.userFormat = format;
\r
4601 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4603 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4604 stream_.userInterleaved = false;
\r
4606 stream_.userInterleaved = true;
\r
4607 stream_.deviceInterleaved[mode] = true;
\r
4609 // Set flags for buffer conversion.
\r
4610 stream_.doConvertBuffer[mode] = false;
\r
4611 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4612 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4613 stream_.doConvertBuffer[mode] = true;
\r
4614 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4615 stream_.nUserChannels[mode] > 1 )
\r
4616 stream_.doConvertBuffer[mode] = true;
\r
4618 if ( stream_.doConvertBuffer[mode] )
\r
4619 setConvertInfo( mode, 0 );
\r
4621 // Allocate necessary internal buffers
\r
4622 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4624 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4625 if ( !stream_.userBuffer[mode] ) {
\r
4626 errorType = RtAudioError::MEMORY_ERROR;
\r
4627 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4631 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4632 stream_.callbackInfo.priority = 15;
\r
4634 stream_.callbackInfo.priority = 0;
\r
4636 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4637 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4639 methodResult = SUCCESS;
\r
4643 SAFE_RELEASE( captureDevices );
\r
4644 SAFE_RELEASE( renderDevices );
\r
4645 SAFE_RELEASE( devicePtr );
\r
4646 CoTaskMemFree( deviceFormat );
\r
4648 // if method failed, close the stream
\r
4649 if ( methodResult == FAILURE )
\r
4652 if ( !errorText_.empty() )
\r
4653 error( errorType );
\r
4654 return methodResult;
\r
4657 //=============================================================================
\r
4659 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4662 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4667 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4670 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4675 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4678 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4683 //-----------------------------------------------------------------------------
\r
4685 void RtApiWasapi::wasapiThread()
\r
4687 // as this is a new thread, we must CoInitialize it
\r
4688 CoInitialize( NULL );
\r
4692 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4693 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4694 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4695 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4696 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4697 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4699 WAVEFORMATEX* captureFormat = NULL;
\r
4700 WAVEFORMATEX* renderFormat = NULL;
\r
4701 float captureSrRatio = 0.0f;
\r
4702 float renderSrRatio = 0.0f;
\r
4703 WasapiBuffer captureBuffer;
\r
4704 WasapiBuffer renderBuffer;
\r
4706 // declare local stream variables
\r
4707 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4708 BYTE* streamBuffer = NULL;
\r
4709 unsigned long captureFlags = 0;
\r
4710 unsigned int bufferFrameCount = 0;
\r
4711 unsigned int numFramesPadding = 0;
\r
4712 unsigned int convBufferSize = 0;
\r
4713 bool callbackPushed = false;
\r
4714 bool callbackPulled = false;
\r
4715 bool callbackStopped = false;
\r
4716 int callbackResult = 0;
\r
4718 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4719 char* convBuffer = NULL;
\r
4720 unsigned int convBuffSize = 0;
\r
4721 unsigned int deviceBuffSize = 0;
\r
4723 errorText_.clear();
\r
4724 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4726 // Attempt to assign "Pro Audio" characteristic to thread
\r
4727 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4729 DWORD taskIndex = 0;
\r
4730 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4731 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4732 FreeLibrary( AvrtDll );
\r
4735 // start capture stream if applicable
\r
4736 if ( captureAudioClient ) {
\r
4737 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4738 if ( FAILED( hr ) ) {
\r
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4743 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4745 // initialize capture stream according to desire buffer size
\r
4746 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4747 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4749 if ( !captureClient ) {
\r
4750 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4751 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4752 desiredBufferPeriod,
\r
4753 desiredBufferPeriod,
\r
4756 if ( FAILED( hr ) ) {
\r
4757 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4761 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4762 ( void** ) &captureClient );
\r
4763 if ( FAILED( hr ) ) {
\r
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4768 // configure captureEvent to trigger on every available capture buffer
\r
4769 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4770 if ( !captureEvent ) {
\r
4771 errorType = RtAudioError::SYSTEM_ERROR;
\r
4772 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4776 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4777 if ( FAILED( hr ) ) {
\r
4778 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4782 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4783 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4786 unsigned int inBufferSize = 0;
\r
4787 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4788 if ( FAILED( hr ) ) {
\r
4789 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4793 // scale outBufferSize according to stream->user sample rate ratio
\r
4794 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4795 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4797 // set captureBuffer size
\r
4798 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4800 // reset the capture stream
\r
4801 hr = captureAudioClient->Reset();
\r
4802 if ( FAILED( hr ) ) {
\r
4803 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4807 // start the capture stream
\r
4808 hr = captureAudioClient->Start();
\r
4809 if ( FAILED( hr ) ) {
\r
4810 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4815 // start render stream if applicable
\r
4816 if ( renderAudioClient ) {
\r
4817 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4818 if ( FAILED( hr ) ) {
\r
4819 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4823 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4825 // initialize render stream according to desire buffer size
\r
4826 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4827 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4829 if ( !renderClient ) {
\r
4830 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4831 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4832 desiredBufferPeriod,
\r
4833 desiredBufferPeriod,
\r
4836 if ( FAILED( hr ) ) {
\r
4837 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4841 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4842 ( void** ) &renderClient );
\r
4843 if ( FAILED( hr ) ) {
\r
4844 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4848 // configure renderEvent to trigger on every available render buffer
\r
4849 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4850 if ( !renderEvent ) {
\r
4851 errorType = RtAudioError::SYSTEM_ERROR;
\r
4852 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4856 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4857 if ( FAILED( hr ) ) {
\r
4858 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4862 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4863 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4866 unsigned int outBufferSize = 0;
\r
4867 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4868 if ( FAILED( hr ) ) {
\r
4869 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4873 // scale inBufferSize according to user->stream sample rate ratio
\r
4874 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4875 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4877 // set renderBuffer size
\r
4878 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4880 // reset the render stream
\r
4881 hr = renderAudioClient->Reset();
\r
4882 if ( FAILED( hr ) ) {
\r
4883 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4887 // start the render stream
\r
4888 hr = renderAudioClient->Start();
\r
4889 if ( FAILED( hr ) ) {
\r
4890 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4895 if ( stream_.mode == INPUT ) {
\r
4896 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4897 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4899 else if ( stream_.mode == OUTPUT ) {
\r
4900 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4901 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4903 else if ( stream_.mode == DUPLEX ) {
\r
4904 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4905 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4906 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4907 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4910 convBuffer = ( char* ) malloc( convBuffSize );
\r
4911 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4912 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4913 errorType = RtAudioError::MEMORY_ERROR;
\r
4914 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4918 // stream process loop
\r
4919 while ( stream_.state != STREAM_STOPPING ) {
\r
4920 if ( !callbackPulled ) {
\r
4923 // 1. Pull callback buffer from inputBuffer
\r
4924 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4925 // Convert callback buffer to user format
\r
4927 if ( captureAudioClient ) {
\r
4928 // Pull callback buffer from inputBuffer
\r
4929 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4930 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4931 stream_.deviceFormat[INPUT] );
\r
4933 if ( callbackPulled ) {
\r
4934 // Convert callback buffer to user sample rate
\r
4935 convertBufferWasapi( stream_.deviceBuffer,
\r
4937 stream_.nDeviceChannels[INPUT],
\r
4938 captureFormat->nSamplesPerSec,
\r
4939 stream_.sampleRate,
\r
4940 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4942 stream_.deviceFormat[INPUT] );
\r
4944 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4945 // Convert callback buffer to user format
\r
4946 convertBuffer( stream_.userBuffer[INPUT],
\r
4947 stream_.deviceBuffer,
\r
4948 stream_.convertInfo[INPUT] );
\r
4951 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4952 memcpy( stream_.userBuffer[INPUT],
\r
4953 stream_.deviceBuffer,
\r
4954 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4959 // if there is no capture stream, set callbackPulled flag
\r
4960 callbackPulled = true;
\r
4963 // Execute Callback
\r
4964 // ================
\r
4965 // 1. Execute user callback method
\r
4966 // 2. Handle return value from callback
\r
4968 // if callback has not requested the stream to stop
\r
4969 if ( callbackPulled && !callbackStopped ) {
\r
4970 // Execute user callback method
\r
4971 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4972 stream_.userBuffer[INPUT],
\r
4973 stream_.bufferSize,
\r
4975 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4976 stream_.callbackInfo.userData );
\r
4978 // Handle return value from callback
\r
4979 if ( callbackResult == 1 ) {
\r
4980 // instantiate a thread to stop this thread
\r
4981 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4982 if ( !threadHandle ) {
\r
4983 errorType = RtAudioError::THREAD_ERROR;
\r
4984 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4987 else if ( !CloseHandle( threadHandle ) ) {
\r
4988 errorType = RtAudioError::THREAD_ERROR;
\r
4989 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4993 callbackStopped = true;
\r
4995 else if ( callbackResult == 2 ) {
\r
4996 // instantiate a thread to stop this thread
\r
4997 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4998 if ( !threadHandle ) {
\r
4999 errorType = RtAudioError::THREAD_ERROR;
\r
5000 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5003 else if ( !CloseHandle( threadHandle ) ) {
\r
5004 errorType = RtAudioError::THREAD_ERROR;
\r
5005 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5009 callbackStopped = true;
\r
5014 // Callback Output
\r
5015 // ===============
\r
5016 // 1. Convert callback buffer to stream format
\r
5017 // 2. Convert callback buffer to stream sample rate and channel count
\r
5018 // 3. Push callback buffer into outputBuffer
\r
5020 if ( renderAudioClient && callbackPulled ) {
\r
5021 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5022 // Convert callback buffer to stream format
\r
5023 convertBuffer( stream_.deviceBuffer,
\r
5024 stream_.userBuffer[OUTPUT],
\r
5025 stream_.convertInfo[OUTPUT] );
\r
5029 // Convert callback buffer to stream sample rate
\r
5030 convertBufferWasapi( convBuffer,
\r
5031 stream_.deviceBuffer,
\r
5032 stream_.nDeviceChannels[OUTPUT],
\r
5033 stream_.sampleRate,
\r
5034 renderFormat->nSamplesPerSec,
\r
5035 stream_.bufferSize,
\r
5037 stream_.deviceFormat[OUTPUT] );
\r
5039 // Push callback buffer into outputBuffer
\r
5040 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5041 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5042 stream_.deviceFormat[OUTPUT] );
\r
5045 // if there is no render stream, set callbackPushed flag
\r
5046 callbackPushed = true;
\r
5051 // 1. Get capture buffer from stream
\r
5052 // 2. Push capture buffer into inputBuffer
\r
5053 // 3. If 2. was successful: Release capture buffer
\r
5055 if ( captureAudioClient ) {
\r
5056 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5057 if ( !callbackPulled ) {
\r
5058 WaitForSingleObject( captureEvent, INFINITE );
\r
5061 // Get capture buffer from stream
\r
5062 hr = captureClient->GetBuffer( &streamBuffer,
\r
5063 &bufferFrameCount,
\r
5064 &captureFlags, NULL, NULL );
\r
5065 if ( FAILED( hr ) ) {
\r
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5070 if ( bufferFrameCount != 0 ) {
\r
5071 // Push capture buffer into inputBuffer
\r
5072 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5073 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5074 stream_.deviceFormat[INPUT] ) )
\r
5076 // Release capture buffer
\r
5077 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5078 if ( FAILED( hr ) ) {
\r
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5085 // Inform WASAPI that capture was unsuccessful
\r
5086 hr = captureClient->ReleaseBuffer( 0 );
\r
5087 if ( FAILED( hr ) ) {
\r
5088 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5095 // Inform WASAPI that capture was unsuccessful
\r
5096 hr = captureClient->ReleaseBuffer( 0 );
\r
5097 if ( FAILED( hr ) ) {
\r
5098 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5106 // 1. Get render buffer from stream
\r
5107 // 2. Pull next buffer from outputBuffer
\r
5108 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5109 // Release render buffer
\r
5111 if ( renderAudioClient ) {
\r
5112 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5113 if ( callbackPulled && !callbackPushed ) {
\r
5114 WaitForSingleObject( renderEvent, INFINITE );
\r
5117 // Get render buffer from stream
\r
5118 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5119 if ( FAILED( hr ) ) {
\r
5120 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5124 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5125 if ( FAILED( hr ) ) {
\r
5126 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5130 bufferFrameCount -= numFramesPadding;
\r
5132 if ( bufferFrameCount != 0 ) {
\r
5133 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5134 if ( FAILED( hr ) ) {
\r
5135 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5139 // Pull next buffer from outputBuffer
\r
5140 // Fill render buffer with next buffer
\r
5141 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5142 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5143 stream_.deviceFormat[OUTPUT] ) )
\r
5145 // Release render buffer
\r
5146 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5147 if ( FAILED( hr ) ) {
\r
5148 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5154 // Inform WASAPI that render was unsuccessful
\r
5155 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5156 if ( FAILED( hr ) ) {
\r
5157 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5164 // Inform WASAPI that render was unsuccessful
\r
5165 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5166 if ( FAILED( hr ) ) {
\r
5167 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5173 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5174 if ( callbackPushed ) {
\r
5175 callbackPulled = false;
\r
5176 // tick stream time
\r
5177 RtApi::tickStreamTime();
\r
5184 CoTaskMemFree( captureFormat );
\r
5185 CoTaskMemFree( renderFormat );
\r
5187 free ( convBuffer );
\r
5191 // update stream state
\r
5192 stream_.state = STREAM_STOPPED;
\r
5194 if ( errorText_.empty() )
\r
5197 error( errorType );
\r
5200 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5204 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5206 // Modified by Robin Davies, October 2005
\r
5207 // - Improvements to DirectX pointer chasing.
\r
5208 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5209 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5210 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5211 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5213 #include <dsound.h>
\r
5214 #include <assert.h>
\r
5215 #include <algorithm>
\r
5217 #if defined(__MINGW32__)
\r
5218 // missing from latest mingw winapi
\r
5219 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5220 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5221 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5222 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5225 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5227 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5228 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5231 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5233 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5234 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5235 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5236 return pointer >= earlierPointer && pointer < laterPointer;
\r
5239 // A structure to hold various information related to the DirectSound
\r
5240 // API implementation.
\r
5242 unsigned int drainCounter; // Tracks callback counts when draining
\r
5243 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5247 UINT bufferPointer[2];
\r
5248 DWORD dsBufferSize[2];
\r
5249 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5253 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5256 // Declarations for utility functions, callbacks, and structures
\r
5257 // specific to the DirectSound implementation.
\r
5258 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5259 LPCTSTR description,
\r
5261 LPVOID lpContext );
\r
5263 static const char* getErrorString( int code );
\r
5265 static unsigned __stdcall callbackHandler( void *ptr );
\r
5274 : found(false) { validId[0] = false; validId[1] = false; }
\r
5277 struct DsProbeData {
\r
5279 std::vector<struct DsDevice>* dsDevices;
\r
5282 RtApiDs :: RtApiDs()
\r
5284 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5285 // accept whatever the mainline chose for a threading model.
\r
5286 coInitialized_ = false;
\r
5287 HRESULT hr = CoInitialize( NULL );
\r
5288 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5291 RtApiDs :: ~RtApiDs()
\r
5293 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5294 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5297 // The DirectSound default output is always the first device.
\r
5298 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5303 // The DirectSound default input is always the first input device,
\r
5304 // which is the first capture device enumerated.
\r
5305 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5310 unsigned int RtApiDs :: getDeviceCount( void )
\r
5312 // Set query flag for previously found devices to false, so that we
\r
5313 // can check for any devices that have disappeared.
\r
5314 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5315 dsDevices[i].found = false;
\r
5317 // Query DirectSound devices.
\r
5318 struct DsProbeData probeInfo;
\r
5319 probeInfo.isInput = false;
\r
5320 probeInfo.dsDevices = &dsDevices;
\r
5321 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5322 if ( FAILED( result ) ) {
\r
5323 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5324 errorText_ = errorStream_.str();
\r
5325 error( RtAudioError::WARNING );
\r
5328 // Query DirectSoundCapture devices.
\r
5329 probeInfo.isInput = true;
\r
5330 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5331 if ( FAILED( result ) ) {
\r
5332 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5333 errorText_ = errorStream_.str();
\r
5334 error( RtAudioError::WARNING );
\r
5337 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5338 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5339 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5343 return static_cast<unsigned int>(dsDevices.size());
\r
5346 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5348 RtAudio::DeviceInfo info;
\r
5349 info.probed = false;
\r
5351 if ( dsDevices.size() == 0 ) {
\r
5352 // Force a query of all devices
\r
5354 if ( dsDevices.size() == 0 ) {
\r
5355 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5356 error( RtAudioError::INVALID_USE );
\r
5361 if ( device >= dsDevices.size() ) {
\r
5362 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5363 error( RtAudioError::INVALID_USE );
\r
5368 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5370 LPDIRECTSOUND output;
\r
5372 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5373 if ( FAILED( result ) ) {
\r
5374 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5375 errorText_ = errorStream_.str();
\r
5376 error( RtAudioError::WARNING );
\r
5380 outCaps.dwSize = sizeof( outCaps );
\r
5381 result = output->GetCaps( &outCaps );
\r
5382 if ( FAILED( result ) ) {
\r
5383 output->Release();
\r
5384 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5385 errorText_ = errorStream_.str();
\r
5386 error( RtAudioError::WARNING );
\r
5390 // Get output channel information.
\r
5391 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5393 // Get sample rate information.
\r
5394 info.sampleRates.clear();
\r
5395 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5396 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5397 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5398 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5400 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5401 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5405 // Get format information.
\r
5406 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5407 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5409 output->Release();
\r
5411 if ( getDefaultOutputDevice() == device )
\r
5412 info.isDefaultOutput = true;
\r
5414 if ( dsDevices[ device ].validId[1] == false ) {
\r
5415 info.name = dsDevices[ device ].name;
\r
5416 info.probed = true;
\r
5422 LPDIRECTSOUNDCAPTURE input;
\r
5423 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5424 if ( FAILED( result ) ) {
\r
5425 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5426 errorText_ = errorStream_.str();
\r
5427 error( RtAudioError::WARNING );
\r
5432 inCaps.dwSize = sizeof( inCaps );
\r
5433 result = input->GetCaps( &inCaps );
\r
5434 if ( FAILED( result ) ) {
\r
5436 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5437 errorText_ = errorStream_.str();
\r
5438 error( RtAudioError::WARNING );
\r
5442 // Get input channel information.
\r
5443 info.inputChannels = inCaps.dwChannels;
\r
5445 // Get sample rate and format information.
\r
5446 std::vector<unsigned int> rates;
\r
5447 if ( inCaps.dwChannels >= 2 ) {
\r
5448 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5449 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5452 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5453 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5454 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5455 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5457 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5458 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5459 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5460 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5461 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5463 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5464 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5465 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5466 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5467 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5470 else if ( inCaps.dwChannels == 1 ) {
\r
5471 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5472 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5473 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5474 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5475 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5476 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5477 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5478 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5480 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5481 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5482 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5483 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5484 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5486 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5487 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5488 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5489 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5490 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5493 else info.inputChannels = 0; // technically, this would be an error
\r
5497 if ( info.inputChannels == 0 ) return info;
\r
5499 // Copy the supported rates to the info structure but avoid duplication.
\r
5501 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5503 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5504 if ( rates[i] == info.sampleRates[j] ) {
\r
5509 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5511 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5513 // If device opens for both playback and capture, we determine the channels.
\r
5514 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5515 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5517 if ( device == 0 ) info.isDefaultInput = true;
\r
5519 // Copy name and return.
\r
5520 info.name = dsDevices[ device ].name;
\r
5521 info.probed = true;
\r
5525 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5526 unsigned int firstChannel, unsigned int sampleRate,
\r
5527 RtAudioFormat format, unsigned int *bufferSize,
\r
5528 RtAudio::StreamOptions *options )
\r
5530 if ( channels + firstChannel > 2 ) {
\r
5531 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5535 size_t nDevices = dsDevices.size();
\r
5536 if ( nDevices == 0 ) {
\r
5537 // This should not happen because a check is made before this function is called.
\r
5538 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5542 if ( device >= nDevices ) {
\r
5543 // This should not happen because a check is made before this function is called.
\r
5544 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5548 if ( mode == OUTPUT ) {
\r
5549 if ( dsDevices[ device ].validId[0] == false ) {
\r
5550 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5551 errorText_ = errorStream_.str();
\r
5555 else { // mode == INPUT
\r
5556 if ( dsDevices[ device ].validId[1] == false ) {
\r
5557 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5558 errorText_ = errorStream_.str();
\r
5563 // According to a note in PortAudio, using GetDesktopWindow()
\r
5564 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5565 // that occur when the application's window is not the foreground
\r
5566 // window. Also, if the application window closes before the
\r
5567 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5568 // problems when using GetDesktopWindow() but it seems fine now
\r
5569 // (January 2010). I'll leave it commented here.
\r
5570 // HWND hWnd = GetForegroundWindow();
\r
5571 HWND hWnd = GetDesktopWindow();
\r
5573 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5574 // two. This is a judgement call and a value of two is probably too
\r
5575 // low for capture, but it should work for playback.
\r
5577 if ( options ) nBuffers = options->numberOfBuffers;
\r
5578 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5579 if ( nBuffers < 2 ) nBuffers = 3;
\r
5581 // Check the lower range of the user-specified buffer size and set
\r
5582 // (arbitrarily) to a lower bound of 32.
\r
5583 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5585 // Create the wave format structure. The data format setting will
\r
5586 // be determined later.
\r
5587 WAVEFORMATEX waveFormat;
\r
5588 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5589 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5590 waveFormat.nChannels = channels + firstChannel;
\r
5591 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5593 // Determine the device buffer size. By default, we'll use the value
\r
5594 // defined above (32K), but we will grow it to make allowances for
\r
5595 // very large software buffer sizes.
\r
5596 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5597 DWORD dsPointerLeadTime = 0;
\r
5599 void *ohandle = 0, *bhandle = 0;
\r
5601 if ( mode == OUTPUT ) {
\r
5603 LPDIRECTSOUND output;
\r
5604 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5605 if ( FAILED( result ) ) {
\r
5606 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5607 errorText_ = errorStream_.str();
\r
5612 outCaps.dwSize = sizeof( outCaps );
\r
5613 result = output->GetCaps( &outCaps );
\r
5614 if ( FAILED( result ) ) {
\r
5615 output->Release();
\r
5616 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5617 errorText_ = errorStream_.str();
\r
5621 // Check channel information.
\r
5622 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5623 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5624 errorText_ = errorStream_.str();
\r
5628 // Check format information. Use 16-bit format unless not
\r
5629 // supported or user requests 8-bit.
\r
5630 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5631 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5632 waveFormat.wBitsPerSample = 16;
\r
5633 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5636 waveFormat.wBitsPerSample = 8;
\r
5637 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5639 stream_.userFormat = format;
\r
5641 // Update wave format structure and buffer information.
\r
5642 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5643 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5644 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5646 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5647 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5648 dsBufferSize *= 2;
\r
5650 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5651 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5652 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5653 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5654 if ( FAILED( result ) ) {
\r
5655 output->Release();
\r
5656 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5657 errorText_ = errorStream_.str();
\r
5661 // Even though we will write to the secondary buffer, we need to
\r
5662 // access the primary buffer to set the correct output format
\r
5663 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5664 // buffer description.
\r
5665 DSBUFFERDESC bufferDescription;
\r
5666 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5667 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5668 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5670 // Obtain the primary buffer
\r
5671 LPDIRECTSOUNDBUFFER buffer;
\r
5672 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5673 if ( FAILED( result ) ) {
\r
5674 output->Release();
\r
5675 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5676 errorText_ = errorStream_.str();
\r
5680 // Set the primary DS buffer sound format.
\r
5681 result = buffer->SetFormat( &waveFormat );
\r
5682 if ( FAILED( result ) ) {
\r
5683 output->Release();
\r
5684 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5685 errorText_ = errorStream_.str();
\r
5689 // Setup the secondary DS buffer description.
\r
5690 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5691 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5692 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5693 DSBCAPS_GLOBALFOCUS |
\r
5694 DSBCAPS_GETCURRENTPOSITION2 |
\r
5695 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5696 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5697 bufferDescription.lpwfxFormat = &waveFormat;
\r
5699 // Try to create the secondary DS buffer. If that doesn't work,
\r
5700 // try to use software mixing. Otherwise, there's a problem.
\r
5701 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5702 if ( FAILED( result ) ) {
\r
5703 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5704 DSBCAPS_GLOBALFOCUS |
\r
5705 DSBCAPS_GETCURRENTPOSITION2 |
\r
5706 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5707 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5708 if ( FAILED( result ) ) {
\r
5709 output->Release();
\r
5710 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5711 errorText_ = errorStream_.str();
\r
5716 // Get the buffer size ... might be different from what we specified.
\r
5718 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5719 result = buffer->GetCaps( &dsbcaps );
\r
5720 if ( FAILED( result ) ) {
\r
5721 output->Release();
\r
5722 buffer->Release();
\r
5723 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5724 errorText_ = errorStream_.str();
\r
5728 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5730 // Lock the DS buffer
\r
5733 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5734 if ( FAILED( result ) ) {
\r
5735 output->Release();
\r
5736 buffer->Release();
\r
5737 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5738 errorText_ = errorStream_.str();
\r
5742 // Zero the DS buffer
\r
5743 ZeroMemory( audioPtr, dataLen );
\r
5745 // Unlock the DS buffer
\r
5746 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5747 if ( FAILED( result ) ) {
\r
5748 output->Release();
\r
5749 buffer->Release();
\r
5750 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5751 errorText_ = errorStream_.str();
\r
5755 ohandle = (void *) output;
\r
5756 bhandle = (void *) buffer;
\r
5759 if ( mode == INPUT ) {
\r
5761 LPDIRECTSOUNDCAPTURE input;
\r
5762 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5763 if ( FAILED( result ) ) {
\r
5764 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5765 errorText_ = errorStream_.str();
\r
5770 inCaps.dwSize = sizeof( inCaps );
\r
5771 result = input->GetCaps( &inCaps );
\r
5772 if ( FAILED( result ) ) {
\r
5774 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5775 errorText_ = errorStream_.str();
\r
5779 // Check channel information.
\r
5780 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5781 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5785 // Check format information. Use 16-bit format unless user
\r
5786 // requests 8-bit.
\r
5787 DWORD deviceFormats;
\r
5788 if ( channels + firstChannel == 2 ) {
\r
5789 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5790 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5791 waveFormat.wBitsPerSample = 8;
\r
5792 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5794 else { // assume 16-bit is supported
\r
5795 waveFormat.wBitsPerSample = 16;
\r
5796 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5799 else { // channel == 1
\r
5800 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5801 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5802 waveFormat.wBitsPerSample = 8;
\r
5803 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5805 else { // assume 16-bit is supported
\r
5806 waveFormat.wBitsPerSample = 16;
\r
5807 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5810 stream_.userFormat = format;
\r
5812 // Update wave format structure and buffer information.
\r
5813 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5814 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5815 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5817 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5818 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5819 dsBufferSize *= 2;
\r
5821 // Setup the secondary DS buffer description.
\r
5822 DSCBUFFERDESC bufferDescription;
\r
5823 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5824 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5825 bufferDescription.dwFlags = 0;
\r
5826 bufferDescription.dwReserved = 0;
\r
5827 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5828 bufferDescription.lpwfxFormat = &waveFormat;
\r
5830 // Create the capture buffer.
\r
5831 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5832 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5833 if ( FAILED( result ) ) {
\r
5835 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5836 errorText_ = errorStream_.str();
\r
5840 // Get the buffer size ... might be different from what we specified.
\r
5841 DSCBCAPS dscbcaps;
\r
5842 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5843 result = buffer->GetCaps( &dscbcaps );
\r
5844 if ( FAILED( result ) ) {
\r
5846 buffer->Release();
\r
5847 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5848 errorText_ = errorStream_.str();
\r
5852 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5854 // NOTE: We could have a problem here if this is a duplex stream
\r
5855 // and the play and capture hardware buffer sizes are different
\r
5856 // (I'm actually not sure if that is a problem or not).
\r
5857 // Currently, we are not verifying that.
\r
5859 // Lock the capture buffer
\r
5862 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5863 if ( FAILED( result ) ) {
\r
5865 buffer->Release();
\r
5866 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5867 errorText_ = errorStream_.str();
\r
5871 // Zero the buffer
\r
5872 ZeroMemory( audioPtr, dataLen );
\r
5874 // Unlock the buffer
\r
5875 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5876 if ( FAILED( result ) ) {
\r
5878 buffer->Release();
\r
5879 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5880 errorText_ = errorStream_.str();
\r
5884 ohandle = (void *) input;
\r
5885 bhandle = (void *) buffer;
\r
5888 // Set various stream parameters
\r
5889 DsHandle *handle = 0;
\r
5890 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5891 stream_.nUserChannels[mode] = channels;
\r
5892 stream_.bufferSize = *bufferSize;
\r
5893 stream_.channelOffset[mode] = firstChannel;
\r
5894 stream_.deviceInterleaved[mode] = true;
\r
5895 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5896 else stream_.userInterleaved = true;
\r
5898 // Set flag for buffer conversion
\r
5899 stream_.doConvertBuffer[mode] = false;
\r
5900 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5901 stream_.doConvertBuffer[mode] = true;
\r
5902 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5903 stream_.doConvertBuffer[mode] = true;
\r
5904 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5905 stream_.nUserChannels[mode] > 1 )
\r
5906 stream_.doConvertBuffer[mode] = true;
\r
5908 // Allocate necessary internal buffers
\r
5909 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5910 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5911 if ( stream_.userBuffer[mode] == NULL ) {
\r
5912 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5916 if ( stream_.doConvertBuffer[mode] ) {
\r
5918 bool makeBuffer = true;
\r
5919 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5920 if ( mode == INPUT ) {
\r
5921 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5922 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5923 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5927 if ( makeBuffer ) {
\r
5928 bufferBytes *= *bufferSize;
\r
5929 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5930 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5931 if ( stream_.deviceBuffer == NULL ) {
\r
5932 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5938 // Allocate our DsHandle structures for the stream.
\r
5939 if ( stream_.apiHandle == 0 ) {
\r
5941 handle = new DsHandle;
\r
5943 catch ( std::bad_alloc& ) {
\r
5944 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5948 // Create a manual-reset event.
\r
5949 handle->condition = CreateEvent( NULL, // no security
\r
5950 TRUE, // manual-reset
\r
5951 FALSE, // non-signaled initially
\r
5952 NULL ); // unnamed
\r
5953 stream_.apiHandle = (void *) handle;
\r
5956 handle = (DsHandle *) stream_.apiHandle;
\r
5957 handle->id[mode] = ohandle;
\r
5958 handle->buffer[mode] = bhandle;
\r
5959 handle->dsBufferSize[mode] = dsBufferSize;
\r
5960 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5962 stream_.device[mode] = device;
\r
5963 stream_.state = STREAM_STOPPED;
\r
5964 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5965 // We had already set up an output stream.
\r
5966 stream_.mode = DUPLEX;
\r
5968 stream_.mode = mode;
\r
5969 stream_.nBuffers = nBuffers;
\r
5970 stream_.sampleRate = sampleRate;
\r
5972 // Setup the buffer conversion information structure.
\r
5973 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5975 // Setup the callback thread.
\r
5976 if ( stream_.callbackInfo.isRunning == false ) {
\r
5977 unsigned threadId;
\r
5978 stream_.callbackInfo.isRunning = true;
\r
5979 stream_.callbackInfo.object = (void *) this;
\r
5980 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5981 &stream_.callbackInfo, 0, &threadId );
\r
5982 if ( stream_.callbackInfo.thread == 0 ) {
\r
5983 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5987 // Boost DS thread priority
\r
5988 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5994 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5995 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5996 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5997 if ( buffer ) buffer->Release();
\r
5998 object->Release();
\r
6000 if ( handle->buffer[1] ) {
\r
6001 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6002 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6003 if ( buffer ) buffer->Release();
\r
6004 object->Release();
\r
6006 CloseHandle( handle->condition );
\r
6008 stream_.apiHandle = 0;
\r
6011 for ( int i=0; i<2; i++ ) {
\r
6012 if ( stream_.userBuffer[i] ) {
\r
6013 free( stream_.userBuffer[i] );
\r
6014 stream_.userBuffer[i] = 0;
\r
6018 if ( stream_.deviceBuffer ) {
\r
6019 free( stream_.deviceBuffer );
\r
6020 stream_.deviceBuffer = 0;
\r
6023 stream_.state = STREAM_CLOSED;
\r
6027 void RtApiDs :: closeStream()
\r
6029 if ( stream_.state == STREAM_CLOSED ) {
\r
6030 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6031 error( RtAudioError::WARNING );
\r
6035 // Stop the callback thread.
\r
6036 stream_.callbackInfo.isRunning = false;
\r
6037 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6038 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6040 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6042 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6043 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6044 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6047 buffer->Release();
\r
6049 object->Release();
\r
6051 if ( handle->buffer[1] ) {
\r
6052 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6053 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6056 buffer->Release();
\r
6058 object->Release();
\r
6060 CloseHandle( handle->condition );
\r
6062 stream_.apiHandle = 0;
\r
6065 for ( int i=0; i<2; i++ ) {
\r
6066 if ( stream_.userBuffer[i] ) {
\r
6067 free( stream_.userBuffer[i] );
\r
6068 stream_.userBuffer[i] = 0;
\r
6072 if ( stream_.deviceBuffer ) {
\r
6073 free( stream_.deviceBuffer );
\r
6074 stream_.deviceBuffer = 0;
\r
6077 stream_.mode = UNINITIALIZED;
\r
6078 stream_.state = STREAM_CLOSED;
\r
6081 void RtApiDs :: startStream()
\r
6084 RtApi::startStream();
\r
6086 if ( stream_.state == STREAM_RUNNING ) {
\r
6087 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6088 error( RtAudioError::WARNING );
\r
6092 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6094 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6095 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6096 // this is already in effect.
\r
6097 timeBeginPeriod( 1 );
\r
6099 buffersRolling = false;
\r
6100 duplexPrerollBytes = 0;
\r
6102 if ( stream_.mode == DUPLEX ) {
\r
6103 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6104 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6107 HRESULT result = 0;
\r
6108 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6110 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6111 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6112 if ( FAILED( result ) ) {
\r
6113 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6114 errorText_ = errorStream_.str();
\r
6119 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6121 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6122 result = buffer->Start( DSCBSTART_LOOPING );
\r
6123 if ( FAILED( result ) ) {
\r
6124 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6125 errorText_ = errorStream_.str();
\r
6130 handle->drainCounter = 0;
\r
6131 handle->internalDrain = false;
\r
6132 ResetEvent( handle->condition );
\r
6133 stream_.state = STREAM_RUNNING;
\r
6136 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6139 void RtApiDs :: stopStream()
\r
6142 if ( stream_.state == STREAM_STOPPED ) {
\r
6143 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6144 error( RtAudioError::WARNING );
\r
6148 HRESULT result = 0;
\r
6151 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6152 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6153 if ( handle->drainCounter == 0 ) {
\r
6154 handle->drainCounter = 2;
\r
6155 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6158 stream_.state = STREAM_STOPPED;
\r
6160 MUTEX_LOCK( &stream_.mutex );
\r
6162 // Stop the buffer and clear memory
\r
6163 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6164 result = buffer->Stop();
\r
6165 if ( FAILED( result ) ) {
\r
6166 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6167 errorText_ = errorStream_.str();
\r
6171 // Lock the buffer and clear it so that if we start to play again,
\r
6172 // we won't have old data playing.
\r
6173 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6174 if ( FAILED( result ) ) {
\r
6175 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6176 errorText_ = errorStream_.str();
\r
6180 // Zero the DS buffer
\r
6181 ZeroMemory( audioPtr, dataLen );
\r
6183 // Unlock the DS buffer
\r
6184 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6185 if ( FAILED( result ) ) {
\r
6186 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6187 errorText_ = errorStream_.str();
\r
6191 // If we start playing again, we must begin at beginning of buffer.
\r
6192 handle->bufferPointer[0] = 0;
\r
6195 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6196 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6200 stream_.state = STREAM_STOPPED;
\r
6202 if ( stream_.mode != DUPLEX )
\r
6203 MUTEX_LOCK( &stream_.mutex );
\r
6205 result = buffer->Stop();
\r
6206 if ( FAILED( result ) ) {
\r
6207 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6208 errorText_ = errorStream_.str();
\r
6212 // Lock the buffer and clear it so that if we start to play again,
\r
6213 // we won't have old data playing.
\r
6214 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6215 if ( FAILED( result ) ) {
\r
6216 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6217 errorText_ = errorStream_.str();
\r
6221 // Zero the DS buffer
\r
6222 ZeroMemory( audioPtr, dataLen );
\r
6224 // Unlock the DS buffer
\r
6225 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6226 if ( FAILED( result ) ) {
\r
6227 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6228 errorText_ = errorStream_.str();
\r
6232 // If we start recording again, we must begin at beginning of buffer.
\r
6233 handle->bufferPointer[1] = 0;
\r
6237 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6238 MUTEX_UNLOCK( &stream_.mutex );
\r
6240 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6243 void RtApiDs :: abortStream()
\r
6246 if ( stream_.state == STREAM_STOPPED ) {
\r
6247 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6248 error( RtAudioError::WARNING );
\r
6252 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6253 handle->drainCounter = 2;
\r
6258 void RtApiDs :: callbackEvent()
\r
6260 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6261 Sleep( 50 ); // sleep 50 milliseconds
\r
6265 if ( stream_.state == STREAM_CLOSED ) {
\r
6266 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6267 error( RtAudioError::WARNING );
\r
6271 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6272 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6274 // Check if we were draining the stream and signal is finished.
\r
6275 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6277 stream_.state = STREAM_STOPPING;
\r
6278 if ( handle->internalDrain == false )
\r
6279 SetEvent( handle->condition );
\r
6285 // Invoke user callback to get fresh output data UNLESS we are
\r
6286 // draining stream.
\r
6287 if ( handle->drainCounter == 0 ) {
\r
6288 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6289 double streamTime = getStreamTime();
\r
6290 RtAudioStreamStatus status = 0;
\r
6291 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6292 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6293 handle->xrun[0] = false;
\r
6295 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6296 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6297 handle->xrun[1] = false;
\r
6299 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6300 stream_.bufferSize, streamTime, status, info->userData );
\r
6301 if ( cbReturnValue == 2 ) {
\r
6302 stream_.state = STREAM_STOPPING;
\r
6303 handle->drainCounter = 2;
\r
6307 else if ( cbReturnValue == 1 ) {
\r
6308 handle->drainCounter = 1;
\r
6309 handle->internalDrain = true;
\r
6314 DWORD currentWritePointer, safeWritePointer;
\r
6315 DWORD currentReadPointer, safeReadPointer;
\r
6316 UINT nextWritePointer;
\r
6318 LPVOID buffer1 = NULL;
\r
6319 LPVOID buffer2 = NULL;
\r
6320 DWORD bufferSize1 = 0;
\r
6321 DWORD bufferSize2 = 0;
\r
6326 MUTEX_LOCK( &stream_.mutex );
\r
6327 if ( stream_.state == STREAM_STOPPED ) {
\r
6328 MUTEX_UNLOCK( &stream_.mutex );
\r
6332 if ( buffersRolling == false ) {
\r
6333 if ( stream_.mode == DUPLEX ) {
\r
6334 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6336 // It takes a while for the devices to get rolling. As a result,
\r
6337 // there's no guarantee that the capture and write device pointers
\r
6338 // will move in lockstep. Wait here for both devices to start
\r
6339 // rolling, and then set our buffer pointers accordingly.
\r
6340 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6341 // bytes later than the write buffer.
\r
6343 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6344 // take place between the two GetCurrentPosition calls... but I'm
\r
6345 // really not sure how to solve the problem. Temporarily boost to
\r
6346 // Realtime priority, maybe; but I'm not sure what priority the
\r
6347 // DirectSound service threads run at. We *should* be roughly
\r
6348 // within a ms or so of correct.
\r
6350 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6351 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6353 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6355 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6356 if ( FAILED( result ) ) {
\r
6357 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6358 errorText_ = errorStream_.str();
\r
6359 MUTEX_UNLOCK( &stream_.mutex );
\r
6360 error( RtAudioError::SYSTEM_ERROR );
\r
6363 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6364 if ( FAILED( result ) ) {
\r
6365 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6366 errorText_ = errorStream_.str();
\r
6367 MUTEX_UNLOCK( &stream_.mutex );
\r
6368 error( RtAudioError::SYSTEM_ERROR );
\r
6372 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6373 if ( FAILED( result ) ) {
\r
6374 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6375 errorText_ = errorStream_.str();
\r
6376 MUTEX_UNLOCK( &stream_.mutex );
\r
6377 error( RtAudioError::SYSTEM_ERROR );
\r
6380 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6381 if ( FAILED( result ) ) {
\r
6382 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6383 errorText_ = errorStream_.str();
\r
6384 MUTEX_UNLOCK( &stream_.mutex );
\r
6385 error( RtAudioError::SYSTEM_ERROR );
\r
6388 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6392 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6394 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6395 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6396 handle->bufferPointer[1] = safeReadPointer;
\r
6398 else if ( stream_.mode == OUTPUT ) {
\r
6400 // Set the proper nextWritePosition after initial startup.
\r
6401 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6402 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6403 if ( FAILED( result ) ) {
\r
6404 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6405 errorText_ = errorStream_.str();
\r
6406 MUTEX_UNLOCK( &stream_.mutex );
\r
6407 error( RtAudioError::SYSTEM_ERROR );
\r
6410 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6411 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6414 buffersRolling = true;
\r
6417 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6419 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6421 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6422 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6423 bufferBytes *= formatBytes( stream_.userFormat );
\r
6424 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6427 // Setup parameters and do buffer conversion if necessary.
\r
6428 if ( stream_.doConvertBuffer[0] ) {
\r
6429 buffer = stream_.deviceBuffer;
\r
6430 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6431 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6432 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6435 buffer = stream_.userBuffer[0];
\r
6436 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6437 bufferBytes *= formatBytes( stream_.userFormat );
\r
6440 // No byte swapping necessary in DirectSound implementation.
\r
6442 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6443 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6445 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6446 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6448 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6449 nextWritePointer = handle->bufferPointer[0];
\r
6451 DWORD endWrite, leadPointer;
\r
6453 // Find out where the read and "safe write" pointers are.
\r
6454 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6455 if ( FAILED( result ) ) {
\r
6456 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6457 errorText_ = errorStream_.str();
\r
6458 MUTEX_UNLOCK( &stream_.mutex );
\r
6459 error( RtAudioError::SYSTEM_ERROR );
\r
6463 // We will copy our output buffer into the region between
\r
6464 // safeWritePointer and leadPointer. If leadPointer is not
\r
6465 // beyond the next endWrite position, wait until it is.
\r
6466 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6467 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6468 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6469 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6470 endWrite = nextWritePointer + bufferBytes;
\r
6472 // Check whether the entire write region is behind the play pointer.
\r
6473 if ( leadPointer >= endWrite ) break;
\r
6475 // If we are here, then we must wait until the leadPointer advances
\r
6476 // beyond the end of our next write region. We use the
\r
6477 // Sleep() function to suspend operation until that happens.
\r
6478 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6479 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6480 if ( millis < 1.0 ) millis = 1.0;
\r
6481 Sleep( (DWORD) millis );
\r
6484 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6485 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6486 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6487 handle->xrun[0] = true;
\r
6488 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6489 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6490 handle->bufferPointer[0] = nextWritePointer;
\r
6491 endWrite = nextWritePointer + bufferBytes;
\r
6494 // Lock free space in the buffer
\r
6495 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6496 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6497 if ( FAILED( result ) ) {
\r
6498 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6499 errorText_ = errorStream_.str();
\r
6500 MUTEX_UNLOCK( &stream_.mutex );
\r
6501 error( RtAudioError::SYSTEM_ERROR );
\r
6505 // Copy our buffer into the DS buffer
\r
6506 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6507 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6509 // Update our buffer offset and unlock sound buffer
\r
6510 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6511 if ( FAILED( result ) ) {
\r
6512 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6513 errorText_ = errorStream_.str();
\r
6514 MUTEX_UNLOCK( &stream_.mutex );
\r
6515 error( RtAudioError::SYSTEM_ERROR );
\r
6518 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6519 handle->bufferPointer[0] = nextWritePointer;
\r
6522 // Don't bother draining input
\r
6523 if ( handle->drainCounter ) {
\r
6524 handle->drainCounter++;
\r
6528 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6530 // Setup parameters.
\r
6531 if ( stream_.doConvertBuffer[1] ) {
\r
6532 buffer = stream_.deviceBuffer;
\r
6533 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6534 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6537 buffer = stream_.userBuffer[1];
\r
6538 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6539 bufferBytes *= formatBytes( stream_.userFormat );
\r
6542 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6543 long nextReadPointer = handle->bufferPointer[1];
\r
6544 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6546 // Find out where the write and "safe read" pointers are.
\r
6547 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6548 if ( FAILED( result ) ) {
\r
6549 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6550 errorText_ = errorStream_.str();
\r
6551 MUTEX_UNLOCK( &stream_.mutex );
\r
6552 error( RtAudioError::SYSTEM_ERROR );
\r
6556 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6557 DWORD endRead = nextReadPointer + bufferBytes;
\r
6559 // Handling depends on whether we are INPUT or DUPLEX.
\r
6560 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6561 // then a wait here will drag the write pointers into the forbidden zone.
\r
6563 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6564 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6565 // practical way to sync up the read and write pointers reliably, given the
\r
6566 // the very complex relationship between phase and increment of the read and write
\r
6569 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6570 // provide a pre-roll period of 0.5 seconds in which we return
\r
6571 // zeros from the read buffer while the pointers sync up.
\r
6573 if ( stream_.mode == DUPLEX ) {
\r
6574 if ( safeReadPointer < endRead ) {
\r
6575 if ( duplexPrerollBytes <= 0 ) {
\r
6576 // Pre-roll time over. Be more agressive.
\r
6577 int adjustment = endRead-safeReadPointer;
\r
6579 handle->xrun[1] = true;
\r
6581 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6582 // and perform fine adjustments later.
\r
6583 // - small adjustments: back off by twice as much.
\r
6584 if ( adjustment >= 2*bufferBytes )
\r
6585 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6587 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6589 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6593 // In pre=roll time. Just do it.
\r
6594 nextReadPointer = safeReadPointer - bufferBytes;
\r
6595 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6597 endRead = nextReadPointer + bufferBytes;
\r
6600 else { // mode == INPUT
\r
6601 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6602 // See comments for playback.
\r
6603 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6604 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6605 if ( millis < 1.0 ) millis = 1.0;
\r
6606 Sleep( (DWORD) millis );
\r
6608 // Wake up and find out where we are now.
\r
6609 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6610 if ( FAILED( result ) ) {
\r
6611 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6612 errorText_ = errorStream_.str();
\r
6613 MUTEX_UNLOCK( &stream_.mutex );
\r
6614 error( RtAudioError::SYSTEM_ERROR );
\r
6618 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6622 // Lock free space in the buffer
\r
6623 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6624 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6625 if ( FAILED( result ) ) {
\r
6626 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6627 errorText_ = errorStream_.str();
\r
6628 MUTEX_UNLOCK( &stream_.mutex );
\r
6629 error( RtAudioError::SYSTEM_ERROR );
\r
6633 if ( duplexPrerollBytes <= 0 ) {
\r
6634 // Copy our buffer into the DS buffer
\r
6635 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6636 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6639 memset( buffer, 0, bufferSize1 );
\r
6640 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6641 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6644 // Update our buffer offset and unlock sound buffer
\r
6645 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6646 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6647 if ( FAILED( result ) ) {
\r
6648 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6649 errorText_ = errorStream_.str();
\r
6650 MUTEX_UNLOCK( &stream_.mutex );
\r
6651 error( RtAudioError::SYSTEM_ERROR );
\r
6654 handle->bufferPointer[1] = nextReadPointer;
\r
6656 // No byte swapping necessary in DirectSound implementation.
\r
6658 // If necessary, convert 8-bit data from unsigned to signed.
\r
6659 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6660 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6662 // Do buffer conversion if necessary.
\r
6663 if ( stream_.doConvertBuffer[1] )
\r
6664 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6668 MUTEX_UNLOCK( &stream_.mutex );
\r
6669 RtApi::tickStreamTime();
\r
6672 // Definitions for utility functions and callbacks
\r
6673 // specific to the DirectSound implementation.
\r
6675 static unsigned __stdcall callbackHandler( void *ptr )
\r
6677 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6678 RtApiDs *object = (RtApiDs *) info->object;
\r
6679 bool* isRunning = &info->isRunning;
\r
6681 while ( *isRunning == true ) {
\r
6682 object->callbackEvent();
\r
6685 _endthreadex( 0 );
\r
6689 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6690 LPCTSTR description,
\r
6691 LPCTSTR /*module*/,
\r
6692 LPVOID lpContext )
\r
6694 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6695 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6698 bool validDevice = false;
\r
6699 if ( probeInfo.isInput == true ) {
\r
6701 LPDIRECTSOUNDCAPTURE object;
\r
6703 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6704 if ( hr != DS_OK ) return TRUE;
\r
6706 caps.dwSize = sizeof(caps);
\r
6707 hr = object->GetCaps( &caps );
\r
6708 if ( hr == DS_OK ) {
\r
6709 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6710 validDevice = true;
\r
6712 object->Release();
\r
6716 LPDIRECTSOUND object;
\r
6717 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6718 if ( hr != DS_OK ) return TRUE;
\r
6720 caps.dwSize = sizeof(caps);
\r
6721 hr = object->GetCaps( &caps );
\r
6722 if ( hr == DS_OK ) {
\r
6723 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6724 validDevice = true;
\r
6726 object->Release();
\r
6729 // If good device, then save its name and guid.
\r
6730 std::string name = convertCharPointerToStdString( description );
\r
6731 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6732 if ( lpguid == NULL )
\r
6733 name = "Default Device";
\r
6734 if ( validDevice ) {
\r
6735 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6736 if ( dsDevices[i].name == name ) {
\r
6737 dsDevices[i].found = true;
\r
6738 if ( probeInfo.isInput ) {
\r
6739 dsDevices[i].id[1] = lpguid;
\r
6740 dsDevices[i].validId[1] = true;
\r
6743 dsDevices[i].id[0] = lpguid;
\r
6744 dsDevices[i].validId[0] = true;
\r
6751 device.name = name;
\r
6752 device.found = true;
\r
6753 if ( probeInfo.isInput ) {
\r
6754 device.id[1] = lpguid;
\r
6755 device.validId[1] = true;
\r
6758 device.id[0] = lpguid;
\r
6759 device.validId[0] = true;
\r
6761 dsDevices.push_back( device );
\r
6767 static const char* getErrorString( int code )
\r
6771 case DSERR_ALLOCATED:
\r
6772 return "Already allocated";
\r
6774 case DSERR_CONTROLUNAVAIL:
\r
6775 return "Control unavailable";
\r
6777 case DSERR_INVALIDPARAM:
\r
6778 return "Invalid parameter";
\r
6780 case DSERR_INVALIDCALL:
\r
6781 return "Invalid call";
\r
6783 case DSERR_GENERIC:
\r
6784 return "Generic error";
\r
6786 case DSERR_PRIOLEVELNEEDED:
\r
6787 return "Priority level needed";
\r
6789 case DSERR_OUTOFMEMORY:
\r
6790 return "Out of memory";
\r
6792 case DSERR_BADFORMAT:
\r
6793 return "The sample rate or the channel format is not supported";
\r
6795 case DSERR_UNSUPPORTED:
\r
6796 return "Not supported";
\r
6798 case DSERR_NODRIVER:
\r
6799 return "No driver";
\r
6801 case DSERR_ALREADYINITIALIZED:
\r
6802 return "Already initialized";
\r
6804 case DSERR_NOAGGREGATION:
\r
6805 return "No aggregation";
\r
6807 case DSERR_BUFFERLOST:
\r
6808 return "Buffer lost";
\r
6810 case DSERR_OTHERAPPHASPRIO:
\r
6811 return "Another application already has priority";
\r
6813 case DSERR_UNINITIALIZED:
\r
6814 return "Uninitialized";
\r
6817 return "DirectSound unknown error";
\r
6820 //******************** End of __WINDOWS_DS__ *********************//
\r
6824 #if defined(__LINUX_ALSA__)
\r
6826 #include <alsa/asoundlib.h>
\r
6827 #include <unistd.h>
\r
6829 // A structure to hold various information related to the ALSA API
\r
6830 // implementation.
\r
6831 struct AlsaHandle {
\r
6832 snd_pcm_t *handles[2];
\r
6833 bool synchronized;
\r
6835 pthread_cond_t runnable_cv;
\r
6839 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6842 static void *alsaCallbackHandler( void * ptr );
\r
6844 RtApiAlsa :: RtApiAlsa()
\r
6846 // Nothing to do here.
\r
6849 RtApiAlsa :: ~RtApiAlsa()
\r
6851 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6854 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6856 unsigned nDevices = 0;
\r
6857 int result, subdevice, card;
\r
6859 snd_ctl_t *handle;
\r
6861 // Count cards and devices
\r
6863 snd_card_next( &card );
\r
6864 while ( card >= 0 ) {
\r
6865 sprintf( name, "hw:%d", card );
\r
6866 result = snd_ctl_open( &handle, name, 0 );
\r
6867 if ( result < 0 ) {
\r
6868 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6869 errorText_ = errorStream_.str();
\r
6870 error( RtAudioError::WARNING );
\r
6875 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6876 if ( result < 0 ) {
\r
6877 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6878 errorText_ = errorStream_.str();
\r
6879 error( RtAudioError::WARNING );
\r
6882 if ( subdevice < 0 )
\r
6887 snd_ctl_close( handle );
\r
6888 snd_card_next( &card );
\r
6891 result = snd_ctl_open( &handle, "default", 0 );
\r
6892 if (result == 0) {
\r
6894 snd_ctl_close( handle );
\r
6900 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6902 RtAudio::DeviceInfo info;
\r
6903 info.probed = false;
\r
6905 unsigned nDevices = 0;
\r
6906 int result, subdevice, card;
\r
6908 snd_ctl_t *chandle;
\r
6910 // Count cards and devices
\r
6913 snd_card_next( &card );
\r
6914 while ( card >= 0 ) {
\r
6915 sprintf( name, "hw:%d", card );
\r
6916 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6917 if ( result < 0 ) {
\r
6918 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6919 errorText_ = errorStream_.str();
\r
6920 error( RtAudioError::WARNING );
\r
6925 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6926 if ( result < 0 ) {
\r
6927 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6928 errorText_ = errorStream_.str();
\r
6929 error( RtAudioError::WARNING );
\r
6932 if ( subdevice < 0 ) break;
\r
6933 if ( nDevices == device ) {
\r
6934 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6940 snd_ctl_close( chandle );
\r
6941 snd_card_next( &card );
\r
6944 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6945 if ( result == 0 ) {
\r
6946 if ( nDevices == device ) {
\r
6947 strcpy( name, "default" );
\r
6953 if ( nDevices == 0 ) {
\r
6954 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6955 error( RtAudioError::INVALID_USE );
\r
6959 if ( device >= nDevices ) {
\r
6960 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6961 error( RtAudioError::INVALID_USE );
\r
6967 // If a stream is already open, we cannot probe the stream devices.
\r
6968 // Thus, use the saved results.
\r
6969 if ( stream_.state != STREAM_CLOSED &&
\r
6970 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6971 snd_ctl_close( chandle );
\r
6972 if ( device >= devices_.size() ) {
\r
6973 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6974 error( RtAudioError::WARNING );
\r
6977 return devices_[ device ];
\r
6980 int openMode = SND_PCM_ASYNC;
\r
6981 snd_pcm_stream_t stream;
\r
6982 snd_pcm_info_t *pcminfo;
\r
6983 snd_pcm_info_alloca( &pcminfo );
\r
6984 snd_pcm_t *phandle;
\r
6985 snd_pcm_hw_params_t *params;
\r
6986 snd_pcm_hw_params_alloca( ¶ms );
\r
6988 // First try for playback unless default device (which has subdev -1)
\r
6989 stream = SND_PCM_STREAM_PLAYBACK;
\r
6990 snd_pcm_info_set_stream( pcminfo, stream );
\r
6991 if ( subdevice != -1 ) {
\r
6992 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6993 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6995 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6996 if ( result < 0 ) {
\r
6997 // Device probably doesn't support playback.
\r
6998 goto captureProbe;
\r
7002 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7003 if ( result < 0 ) {
\r
7004 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7005 errorText_ = errorStream_.str();
\r
7006 error( RtAudioError::WARNING );
\r
7007 goto captureProbe;
\r
7010 // The device is open ... fill the parameter structure.
\r
7011 result = snd_pcm_hw_params_any( phandle, params );
\r
7012 if ( result < 0 ) {
\r
7013 snd_pcm_close( phandle );
\r
7014 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7015 errorText_ = errorStream_.str();
\r
7016 error( RtAudioError::WARNING );
\r
7017 goto captureProbe;
\r
7020 // Get output channel information.
\r
7021 unsigned int value;
\r
7022 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7023 if ( result < 0 ) {
\r
7024 snd_pcm_close( phandle );
\r
7025 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7026 errorText_ = errorStream_.str();
\r
7027 error( RtAudioError::WARNING );
\r
7028 goto captureProbe;
\r
7030 info.outputChannels = value;
\r
7031 snd_pcm_close( phandle );
\r
7034 stream = SND_PCM_STREAM_CAPTURE;
\r
7035 snd_pcm_info_set_stream( pcminfo, stream );
\r
7037 // Now try for capture unless default device (with subdev = -1)
\r
7038 if ( subdevice != -1 ) {
\r
7039 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7040 snd_ctl_close( chandle );
\r
7041 if ( result < 0 ) {
\r
7042 // Device probably doesn't support capture.
\r
7043 if ( info.outputChannels == 0 ) return info;
\r
7044 goto probeParameters;
\r
7048 snd_ctl_close( chandle );
\r
7050 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7051 if ( result < 0 ) {
\r
7052 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7053 errorText_ = errorStream_.str();
\r
7054 error( RtAudioError::WARNING );
\r
7055 if ( info.outputChannels == 0 ) return info;
\r
7056 goto probeParameters;
\r
7059 // The device is open ... fill the parameter structure.
\r
7060 result = snd_pcm_hw_params_any( phandle, params );
\r
7061 if ( result < 0 ) {
\r
7062 snd_pcm_close( phandle );
\r
7063 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7064 errorText_ = errorStream_.str();
\r
7065 error( RtAudioError::WARNING );
\r
7066 if ( info.outputChannels == 0 ) return info;
\r
7067 goto probeParameters;
\r
7070 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7071 if ( result < 0 ) {
\r
7072 snd_pcm_close( phandle );
\r
7073 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7074 errorText_ = errorStream_.str();
\r
7075 error( RtAudioError::WARNING );
\r
7076 if ( info.outputChannels == 0 ) return info;
\r
7077 goto probeParameters;
\r
7079 info.inputChannels = value;
\r
7080 snd_pcm_close( phandle );
\r
7082 // If device opens for both playback and capture, we determine the channels.
\r
7083 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7084 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7086 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7087 if ( device == 0 && info.outputChannels > 0 )
\r
7088 info.isDefaultOutput = true;
\r
7089 if ( device == 0 && info.inputChannels > 0 )
\r
7090 info.isDefaultInput = true;
\r
7093 // At this point, we just need to figure out the supported data
\r
7094 // formats and sample rates. We'll proceed by opening the device in
\r
7095 // the direction with the maximum number of channels, or playback if
\r
7096 // they are equal. This might limit our sample rate options, but so
\r
7099 if ( info.outputChannels >= info.inputChannels )
\r
7100 stream = SND_PCM_STREAM_PLAYBACK;
\r
7102 stream = SND_PCM_STREAM_CAPTURE;
\r
7103 snd_pcm_info_set_stream( pcminfo, stream );
\r
7105 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7106 if ( result < 0 ) {
\r
7107 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7108 errorText_ = errorStream_.str();
\r
7109 error( RtAudioError::WARNING );
\r
7113 // The device is open ... fill the parameter structure.
\r
7114 result = snd_pcm_hw_params_any( phandle, params );
\r
7115 if ( result < 0 ) {
\r
7116 snd_pcm_close( phandle );
\r
7117 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7118 errorText_ = errorStream_.str();
\r
7119 error( RtAudioError::WARNING );
\r
7123 // Test our discrete set of sample rate values.
\r
7124 info.sampleRates.clear();
\r
7125 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7126 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7127 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7129 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7130 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7133 if ( info.sampleRates.size() == 0 ) {
\r
7134 snd_pcm_close( phandle );
\r
7135 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7136 errorText_ = errorStream_.str();
\r
7137 error( RtAudioError::WARNING );
\r
7141 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7142 snd_pcm_format_t format;
\r
7143 info.nativeFormats = 0;
\r
7144 format = SND_PCM_FORMAT_S8;
\r
7145 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7146 info.nativeFormats |= RTAUDIO_SINT8;
\r
7147 format = SND_PCM_FORMAT_S16;
\r
7148 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7149 info.nativeFormats |= RTAUDIO_SINT16;
\r
7150 format = SND_PCM_FORMAT_S24;
\r
7151 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7152 info.nativeFormats |= RTAUDIO_SINT24;
\r
7153 format = SND_PCM_FORMAT_S32;
\r
7154 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7155 info.nativeFormats |= RTAUDIO_SINT32;
\r
7156 format = SND_PCM_FORMAT_FLOAT;
\r
7157 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7158 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7159 format = SND_PCM_FORMAT_FLOAT64;
\r
7160 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7161 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7163 // Check that we have at least one supported format
\r
7164 if ( info.nativeFormats == 0 ) {
\r
7165 snd_pcm_close( phandle );
\r
7166 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7167 errorText_ = errorStream_.str();
\r
7168 error( RtAudioError::WARNING );
\r
7172 // Get the device name
\r
7174 result = snd_card_get_name( card, &cardname );
\r
7175 if ( result >= 0 ) {
\r
7176 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7181 // That's all ... close the device and return
\r
7182 snd_pcm_close( phandle );
\r
7183 info.probed = true;
\r
7187 void RtApiAlsa :: saveDeviceInfo( void )
\r
7191 unsigned int nDevices = getDeviceCount();
\r
7192 devices_.resize( nDevices );
\r
7193 for ( unsigned int i=0; i<nDevices; i++ )
\r
7194 devices_[i] = getDeviceInfo( i );
\r
7197 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7198 unsigned int firstChannel, unsigned int sampleRate,
\r
7199 RtAudioFormat format, unsigned int *bufferSize,
\r
7200 RtAudio::StreamOptions *options )
\r
7203 #if defined(__RTAUDIO_DEBUG__)
\r
7204 snd_output_t *out;
\r
7205 snd_output_stdio_attach(&out, stderr, 0);
\r
7208 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7210 unsigned nDevices = 0;
\r
7211 int result, subdevice, card;
\r
7213 snd_ctl_t *chandle;
\r
7215 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7216 snprintf(name, sizeof(name), "%s", "default");
\r
7218 // Count cards and devices
\r
7220 snd_card_next( &card );
\r
7221 while ( card >= 0 ) {
\r
7222 sprintf( name, "hw:%d", card );
\r
7223 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7224 if ( result < 0 ) {
\r
7225 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7226 errorText_ = errorStream_.str();
\r
7231 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7232 if ( result < 0 ) break;
\r
7233 if ( subdevice < 0 ) break;
\r
7234 if ( nDevices == device ) {
\r
7235 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7236 snd_ctl_close( chandle );
\r
7241 snd_ctl_close( chandle );
\r
7242 snd_card_next( &card );
\r
7245 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7246 if ( result == 0 ) {
\r
7247 if ( nDevices == device ) {
\r
7248 strcpy( name, "default" );
\r
7254 if ( nDevices == 0 ) {
\r
7255 // This should not happen because a check is made before this function is called.
\r
7256 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7260 if ( device >= nDevices ) {
\r
7261 // This should not happen because a check is made before this function is called.
\r
7262 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7269 // The getDeviceInfo() function will not work for a device that is
\r
7270 // already open. Thus, we'll probe the system before opening a
\r
7271 // stream and save the results for use by getDeviceInfo().
\r
7272 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7273 this->saveDeviceInfo();
\r
7275 snd_pcm_stream_t stream;
\r
7276 if ( mode == OUTPUT )
\r
7277 stream = SND_PCM_STREAM_PLAYBACK;
\r
7279 stream = SND_PCM_STREAM_CAPTURE;
\r
7281 snd_pcm_t *phandle;
\r
7282 int openMode = SND_PCM_ASYNC;
\r
7283 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7284 if ( result < 0 ) {
\r
7285 if ( mode == OUTPUT )
\r
7286 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7288 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7289 errorText_ = errorStream_.str();
\r
7293 // Fill the parameter structure.
\r
7294 snd_pcm_hw_params_t *hw_params;
\r
7295 snd_pcm_hw_params_alloca( &hw_params );
\r
7296 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7297 if ( result < 0 ) {
\r
7298 snd_pcm_close( phandle );
\r
7299 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7300 errorText_ = errorStream_.str();
\r
7304 #if defined(__RTAUDIO_DEBUG__)
\r
7305 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7306 snd_pcm_hw_params_dump( hw_params, out );
\r
7309 // Set access ... check user preference.
\r
7310 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7311 stream_.userInterleaved = false;
\r
7312 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7313 if ( result < 0 ) {
\r
7314 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7315 stream_.deviceInterleaved[mode] = true;
\r
7318 stream_.deviceInterleaved[mode] = false;
\r
7321 stream_.userInterleaved = true;
\r
7322 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7323 if ( result < 0 ) {
\r
7324 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7325 stream_.deviceInterleaved[mode] = false;
\r
7328 stream_.deviceInterleaved[mode] = true;
\r
7331 if ( result < 0 ) {
\r
7332 snd_pcm_close( phandle );
\r
7333 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7334 errorText_ = errorStream_.str();
\r
7338 // Determine how to set the device format.
\r
7339 stream_.userFormat = format;
\r
7340 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7342 if ( format == RTAUDIO_SINT8 )
\r
7343 deviceFormat = SND_PCM_FORMAT_S8;
\r
7344 else if ( format == RTAUDIO_SINT16 )
\r
7345 deviceFormat = SND_PCM_FORMAT_S16;
\r
7346 else if ( format == RTAUDIO_SINT24 )
\r
7347 deviceFormat = SND_PCM_FORMAT_S24;
\r
7348 else if ( format == RTAUDIO_SINT32 )
\r
7349 deviceFormat = SND_PCM_FORMAT_S32;
\r
7350 else if ( format == RTAUDIO_FLOAT32 )
\r
7351 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7352 else if ( format == RTAUDIO_FLOAT64 )
\r
7353 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7355 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7356 stream_.deviceFormat[mode] = format;
\r
7360 // The user requested format is not natively supported by the device.
\r
7361 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7362 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7363 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7367 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7368 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7369 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7373 deviceFormat = SND_PCM_FORMAT_S32;
\r
7374 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7375 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7379 deviceFormat = SND_PCM_FORMAT_S24;
\r
7380 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7381 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7385 deviceFormat = SND_PCM_FORMAT_S16;
\r
7386 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7387 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7391 deviceFormat = SND_PCM_FORMAT_S8;
\r
7392 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7393 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7397 // If we get here, no supported format was found.
\r
7398 snd_pcm_close( phandle );
\r
7399 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7400 errorText_ = errorStream_.str();
\r
7404 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7405 if ( result < 0 ) {
\r
7406 snd_pcm_close( phandle );
\r
7407 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7408 errorText_ = errorStream_.str();
\r
7412 // Determine whether byte-swaping is necessary.
\r
7413 stream_.doByteSwap[mode] = false;
\r
7414 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7415 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7416 if ( result == 0 )
\r
7417 stream_.doByteSwap[mode] = true;
\r
7418 else if (result < 0) {
\r
7419 snd_pcm_close( phandle );
\r
7420 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7421 errorText_ = errorStream_.str();
\r
7426 // Set the sample rate.
\r
7427 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7428 if ( result < 0 ) {
\r
7429 snd_pcm_close( phandle );
\r
7430 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7431 errorText_ = errorStream_.str();
\r
7435 // Determine the number of channels for this device. We support a possible
\r
7436 // minimum device channel number > than the value requested by the user.
\r
7437 stream_.nUserChannels[mode] = channels;
\r
7438 unsigned int value;
\r
7439 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7440 unsigned int deviceChannels = value;
\r
7441 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7442 snd_pcm_close( phandle );
\r
7443 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7444 errorText_ = errorStream_.str();
\r
7448 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7449 if ( result < 0 ) {
\r
7450 snd_pcm_close( phandle );
\r
7451 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7452 errorText_ = errorStream_.str();
\r
7455 deviceChannels = value;
\r
7456 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7457 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7459 // Set the device channels.
\r
7460 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7461 if ( result < 0 ) {
\r
7462 snd_pcm_close( phandle );
\r
7463 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7464 errorText_ = errorStream_.str();
\r
7468 // Set the buffer (or period) size.
\r
7470 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7471 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7472 if ( result < 0 ) {
\r
7473 snd_pcm_close( phandle );
\r
7474 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7475 errorText_ = errorStream_.str();
\r
7478 *bufferSize = periodSize;
\r
7480 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7481 unsigned int periods = 0;
\r
7482 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7483 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7484 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7485 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7486 if ( result < 0 ) {
\r
7487 snd_pcm_close( phandle );
\r
7488 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7489 errorText_ = errorStream_.str();
\r
7493 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7494 // MUST be the same in both directions!
\r
7495 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7496 snd_pcm_close( phandle );
\r
7497 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7498 errorText_ = errorStream_.str();
\r
7502 stream_.bufferSize = *bufferSize;
\r
7504 // Install the hardware configuration
\r
7505 result = snd_pcm_hw_params( phandle, hw_params );
\r
7506 if ( result < 0 ) {
\r
7507 snd_pcm_close( phandle );
\r
7508 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7509 errorText_ = errorStream_.str();
\r
7513 #if defined(__RTAUDIO_DEBUG__)
\r
7514 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7515 snd_pcm_hw_params_dump( hw_params, out );
\r
7518 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7519 snd_pcm_sw_params_t *sw_params = NULL;
\r
7520 snd_pcm_sw_params_alloca( &sw_params );
\r
7521 snd_pcm_sw_params_current( phandle, sw_params );
\r
7522 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7523 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7524 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7526 // The following two settings were suggested by Theo Veenker
\r
7527 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7528 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7530 // here are two options for a fix
\r
7531 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7532 snd_pcm_uframes_t val;
\r
7533 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7534 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7536 result = snd_pcm_sw_params( phandle, sw_params );
\r
7537 if ( result < 0 ) {
\r
7538 snd_pcm_close( phandle );
\r
7539 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7540 errorText_ = errorStream_.str();
\r
7544 #if defined(__RTAUDIO_DEBUG__)
\r
7545 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7546 snd_pcm_sw_params_dump( sw_params, out );
\r
7549 // Set flags for buffer conversion
\r
7550 stream_.doConvertBuffer[mode] = false;
\r
7551 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7552 stream_.doConvertBuffer[mode] = true;
\r
7553 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7554 stream_.doConvertBuffer[mode] = true;
\r
7555 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7556 stream_.nUserChannels[mode] > 1 )
\r
7557 stream_.doConvertBuffer[mode] = true;
\r
7559 // Allocate the ApiHandle if necessary and then save.
\r
7560 AlsaHandle *apiInfo = 0;
\r
7561 if ( stream_.apiHandle == 0 ) {
\r
7563 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7565 catch ( std::bad_alloc& ) {
\r
7566 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7570 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7571 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7575 stream_.apiHandle = (void *) apiInfo;
\r
7576 apiInfo->handles[0] = 0;
\r
7577 apiInfo->handles[1] = 0;
\r
7580 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7582 apiInfo->handles[mode] = phandle;
\r
7585 // Allocate necessary internal buffers.
\r
7586 unsigned long bufferBytes;
\r
7587 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7588 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7589 if ( stream_.userBuffer[mode] == NULL ) {
\r
7590 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7594 if ( stream_.doConvertBuffer[mode] ) {
\r
7596 bool makeBuffer = true;
\r
7597 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7598 if ( mode == INPUT ) {
\r
7599 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7600 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7601 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7605 if ( makeBuffer ) {
\r
7606 bufferBytes *= *bufferSize;
\r
7607 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7608 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7609 if ( stream_.deviceBuffer == NULL ) {
\r
7610 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7616 stream_.sampleRate = sampleRate;
\r
7617 stream_.nBuffers = periods;
\r
7618 stream_.device[mode] = device;
\r
7619 stream_.state = STREAM_STOPPED;
\r
7621 // Setup the buffer conversion information structure.
\r
7622 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7624 // Setup thread if necessary.
\r
7625 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7626 // We had already set up an output stream.
\r
7627 stream_.mode = DUPLEX;
\r
7628 // Link the streams if possible.
\r
7629 apiInfo->synchronized = false;
\r
7630 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7631 apiInfo->synchronized = true;
\r
7633 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7634 error( RtAudioError::WARNING );
\r
7638 stream_.mode = mode;
\r
7640 // Setup callback thread.
\r
7641 stream_.callbackInfo.object = (void *) this;
\r
7643 // Set the thread attributes for joinable and realtime scheduling
\r
7644 // priority (optional). The higher priority will only take affect
\r
7645 // if the program is run as root or suid. Note, under Linux
\r
7646 // processes with CAP_SYS_NICE privilege, a user can change
\r
7647 // scheduling policy and priority (thus need not be root). See
\r
7648 // POSIX "capabilities".
\r
7649 pthread_attr_t attr;
\r
7650 pthread_attr_init( &attr );
\r
7651 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7653 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7654 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7655 // We previously attempted to increase the audio callback priority
\r
7656 // to SCHED_RR here via the attributes. However, while no errors
\r
7657 // were reported in doing so, it did not work. So, now this is
\r
7658 // done in the alsaCallbackHandler function.
\r
7659 stream_.callbackInfo.doRealtime = true;
\r
7660 int priority = options->priority;
\r
7661 int min = sched_get_priority_min( SCHED_RR );
\r
7662 int max = sched_get_priority_max( SCHED_RR );
\r
7663 if ( priority < min ) priority = min;
\r
7664 else if ( priority > max ) priority = max;
\r
7665 stream_.callbackInfo.priority = priority;
\r
7669 stream_.callbackInfo.isRunning = true;
\r
7670 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7671 pthread_attr_destroy( &attr );
\r
7673 stream_.callbackInfo.isRunning = false;
\r
7674 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7683 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7684 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7685 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7687 stream_.apiHandle = 0;
\r
7690 if ( phandle) snd_pcm_close( phandle );
\r
7692 for ( int i=0; i<2; i++ ) {
\r
7693 if ( stream_.userBuffer[i] ) {
\r
7694 free( stream_.userBuffer[i] );
\r
7695 stream_.userBuffer[i] = 0;
\r
7699 if ( stream_.deviceBuffer ) {
\r
7700 free( stream_.deviceBuffer );
\r
7701 stream_.deviceBuffer = 0;
\r
7704 stream_.state = STREAM_CLOSED;
\r
7708 void RtApiAlsa :: closeStream()
\r
7710 if ( stream_.state == STREAM_CLOSED ) {
\r
7711 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7712 error( RtAudioError::WARNING );
\r
7716 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7717 stream_.callbackInfo.isRunning = false;
\r
7718 MUTEX_LOCK( &stream_.mutex );
\r
7719 if ( stream_.state == STREAM_STOPPED ) {
\r
7720 apiInfo->runnable = true;
\r
7721 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7723 MUTEX_UNLOCK( &stream_.mutex );
\r
7724 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7726 if ( stream_.state == STREAM_RUNNING ) {
\r
7727 stream_.state = STREAM_STOPPED;
\r
7728 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7729 snd_pcm_drop( apiInfo->handles[0] );
\r
7730 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7731 snd_pcm_drop( apiInfo->handles[1] );
\r
7735 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7736 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7737 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7739 stream_.apiHandle = 0;
\r
7742 for ( int i=0; i<2; i++ ) {
\r
7743 if ( stream_.userBuffer[i] ) {
\r
7744 free( stream_.userBuffer[i] );
\r
7745 stream_.userBuffer[i] = 0;
\r
7749 if ( stream_.deviceBuffer ) {
\r
7750 free( stream_.deviceBuffer );
\r
7751 stream_.deviceBuffer = 0;
\r
7754 stream_.mode = UNINITIALIZED;
\r
7755 stream_.state = STREAM_CLOSED;
\r
7758 void RtApiAlsa :: startStream()
\r
7760 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7763 RtApi::startStream();
\r
7764 if ( stream_.state == STREAM_RUNNING ) {
\r
7765 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7766 error( RtAudioError::WARNING );
\r
7770 MUTEX_LOCK( &stream_.mutex );
\r
7773 snd_pcm_state_t state;
\r
7774 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7775 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7776 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7777 state = snd_pcm_state( handle[0] );
\r
7778 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7779 result = snd_pcm_prepare( handle[0] );
\r
7780 if ( result < 0 ) {
\r
7781 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7782 errorText_ = errorStream_.str();
\r
7788 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7789 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7790 state = snd_pcm_state( handle[1] );
\r
7791 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7792 result = snd_pcm_prepare( handle[1] );
\r
7793 if ( result < 0 ) {
\r
7794 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7795 errorText_ = errorStream_.str();
\r
7801 stream_.state = STREAM_RUNNING;
\r
7804 apiInfo->runnable = true;
\r
7805 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7806 MUTEX_UNLOCK( &stream_.mutex );
\r
7808 if ( result >= 0 ) return;
\r
7809 error( RtAudioError::SYSTEM_ERROR );
\r
7812 void RtApiAlsa :: stopStream()
\r
7815 if ( stream_.state == STREAM_STOPPED ) {
\r
7816 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7817 error( RtAudioError::WARNING );
\r
7821 stream_.state = STREAM_STOPPED;
\r
7822 MUTEX_LOCK( &stream_.mutex );
\r
7825 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7826 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7827 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7828 if ( apiInfo->synchronized )
\r
7829 result = snd_pcm_drop( handle[0] );
\r
7831 result = snd_pcm_drain( handle[0] );
\r
7832 if ( result < 0 ) {
\r
7833 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7834 errorText_ = errorStream_.str();
\r
7839 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7840 result = snd_pcm_drop( handle[1] );
\r
7841 if ( result < 0 ) {
\r
7842 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7843 errorText_ = errorStream_.str();
\r
7849 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7850 MUTEX_UNLOCK( &stream_.mutex );
\r
7852 if ( result >= 0 ) return;
\r
7853 error( RtAudioError::SYSTEM_ERROR );
\r
7856 void RtApiAlsa :: abortStream()
\r
7859 if ( stream_.state == STREAM_STOPPED ) {
\r
7860 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7861 error( RtAudioError::WARNING );
\r
7865 stream_.state = STREAM_STOPPED;
\r
7866 MUTEX_LOCK( &stream_.mutex );
\r
7869 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7870 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7871 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7872 result = snd_pcm_drop( handle[0] );
\r
7873 if ( result < 0 ) {
\r
7874 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7875 errorText_ = errorStream_.str();
\r
7880 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7881 result = snd_pcm_drop( handle[1] );
\r
7882 if ( result < 0 ) {
\r
7883 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7884 errorText_ = errorStream_.str();
\r
7890 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7891 MUTEX_UNLOCK( &stream_.mutex );
\r
7893 if ( result >= 0 ) return;
\r
7894 error( RtAudioError::SYSTEM_ERROR );
\r
7897 void RtApiAlsa :: callbackEvent()
\r
7899 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7900 if ( stream_.state == STREAM_STOPPED ) {
\r
7901 MUTEX_LOCK( &stream_.mutex );
\r
7902 while ( !apiInfo->runnable )
\r
7903 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7905 if ( stream_.state != STREAM_RUNNING ) {
\r
7906 MUTEX_UNLOCK( &stream_.mutex );
\r
7909 MUTEX_UNLOCK( &stream_.mutex );
\r
7912 if ( stream_.state == STREAM_CLOSED ) {
\r
7913 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7914 error( RtAudioError::WARNING );
\r
7918 int doStopStream = 0;
\r
7919 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7920 double streamTime = getStreamTime();
\r
7921 RtAudioStreamStatus status = 0;
\r
7922 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7923 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7924 apiInfo->xrun[0] = false;
\r
7926 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7927 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7928 apiInfo->xrun[1] = false;
\r
7930 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7931 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7933 if ( doStopStream == 2 ) {
\r
7938 MUTEX_LOCK( &stream_.mutex );
\r
7940 // The state might change while waiting on a mutex.
\r
7941 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7946 snd_pcm_t **handle;
\r
7947 snd_pcm_sframes_t frames;
\r
7948 RtAudioFormat format;
\r
7949 handle = (snd_pcm_t **) apiInfo->handles;
\r
7951 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7953 // Setup parameters.
\r
7954 if ( stream_.doConvertBuffer[1] ) {
\r
7955 buffer = stream_.deviceBuffer;
\r
7956 channels = stream_.nDeviceChannels[1];
\r
7957 format = stream_.deviceFormat[1];
\r
7960 buffer = stream_.userBuffer[1];
\r
7961 channels = stream_.nUserChannels[1];
\r
7962 format = stream_.userFormat;
\r
7965 // Read samples from device in interleaved/non-interleaved format.
\r
7966 if ( stream_.deviceInterleaved[1] )
\r
7967 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7969 void *bufs[channels];
\r
7970 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7971 for ( int i=0; i<channels; i++ )
\r
7972 bufs[i] = (void *) (buffer + (i * offset));
\r
7973 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7976 if ( result < (int) stream_.bufferSize ) {
\r
7977 // Either an error or overrun occured.
\r
7978 if ( result == -EPIPE ) {
\r
7979 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7980 if ( state == SND_PCM_STATE_XRUN ) {
\r
7981 apiInfo->xrun[1] = true;
\r
7982 result = snd_pcm_prepare( handle[1] );
\r
7983 if ( result < 0 ) {
\r
7984 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7985 errorText_ = errorStream_.str();
\r
7989 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7990 errorText_ = errorStream_.str();
\r
7994 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7995 errorText_ = errorStream_.str();
\r
7997 error( RtAudioError::WARNING );
\r
8001 // Do byte swapping if necessary.
\r
8002 if ( stream_.doByteSwap[1] )
\r
8003 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8005 // Do buffer conversion if necessary.
\r
8006 if ( stream_.doConvertBuffer[1] )
\r
8007 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8009 // Check stream latency
\r
8010 result = snd_pcm_delay( handle[1], &frames );
\r
8011 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8016 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8018 // Setup parameters and do buffer conversion if necessary.
\r
8019 if ( stream_.doConvertBuffer[0] ) {
\r
8020 buffer = stream_.deviceBuffer;
\r
8021 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8022 channels = stream_.nDeviceChannels[0];
\r
8023 format = stream_.deviceFormat[0];
\r
8026 buffer = stream_.userBuffer[0];
\r
8027 channels = stream_.nUserChannels[0];
\r
8028 format = stream_.userFormat;
\r
8031 // Do byte swapping if necessary.
\r
8032 if ( stream_.doByteSwap[0] )
\r
8033 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8035 // Write samples to device in interleaved/non-interleaved format.
\r
8036 if ( stream_.deviceInterleaved[0] )
\r
8037 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8039 void *bufs[channels];
\r
8040 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8041 for ( int i=0; i<channels; i++ )
\r
8042 bufs[i] = (void *) (buffer + (i * offset));
\r
8043 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8046 if ( result < (int) stream_.bufferSize ) {
\r
8047 // Either an error or underrun occured.
\r
8048 if ( result == -EPIPE ) {
\r
8049 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8050 if ( state == SND_PCM_STATE_XRUN ) {
\r
8051 apiInfo->xrun[0] = true;
\r
8052 result = snd_pcm_prepare( handle[0] );
\r
8053 if ( result < 0 ) {
\r
8054 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8055 errorText_ = errorStream_.str();
\r
8058 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8061 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8062 errorText_ = errorStream_.str();
\r
8066 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8067 errorText_ = errorStream_.str();
\r
8069 error( RtAudioError::WARNING );
\r
8073 // Check stream latency
\r
8074 result = snd_pcm_delay( handle[0], &frames );
\r
8075 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8079 MUTEX_UNLOCK( &stream_.mutex );
\r
8081 RtApi::tickStreamTime();
\r
8082 if ( doStopStream == 1 ) this->stopStream();
\r
8085 static void *alsaCallbackHandler( void *ptr )
\r
8087 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8088 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8089 bool *isRunning = &info->isRunning;
\r
8091 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8092 if ( info->doRealtime ) {
\r
8093 pthread_t tID = pthread_self(); // ID of this thread
\r
8094 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8095 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8099 while ( *isRunning == true ) {
\r
8100 pthread_testcancel();
\r
8101 object->callbackEvent();
\r
8104 pthread_exit( NULL );
\r
8107 //******************** End of __LINUX_ALSA__ *********************//
\r
8110 #if defined(__LINUX_PULSE__)
\r
8112 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8113 // and Tristan Matthews.
\r
8115 #include <pulse/error.h>
\r
8116 #include <pulse/simple.h>
\r
8117 #include <pulse/pulseaudio.h>
\r
8120 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8121 44100, 48000, 96000, 0};
\r
8123 struct rtaudio_pa_format_mapping_t {
\r
8124 RtAudioFormat rtaudio_format;
\r
8125 pa_sample_format_t pa_format;
\r
8128 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8129 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8130 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8131 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8132 {0, PA_SAMPLE_INVALID}};
\r
8134 struct PulseAudioHandle {
\r
8135 pa_simple *s_play;
\r
8138 pthread_cond_t runnable_cv;
\r
8140 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8143 RtApiPulse::~RtApiPulse()
\r
8145 if ( stream_.state != STREAM_CLOSED )
\r
8149 unsigned int RtApiPulse::getDeviceCount( void )
\r
8154 void RtApiPulse::sinkInfoCallback(pa_context*, const pa_sink_info* info, int, void* arg)
\r
8156 RtApiPulse* api = (RtApiPulse *) arg;
\r
8158 api->channels_ = info->sample_spec.channels;
\r
8160 pa_threaded_mainloop_signal(api->mainloop_, 0);
\r
8163 void RtApiPulse::contextStateCallback(pa_context* c, void* arg)
\r
8165 pa_threaded_mainloop* mainloop = (pa_threaded_mainloop*) arg;
\r
8167 switch (pa_context_get_state(c)) {
\r
8168 case PA_CONTEXT_READY:
\r
8169 case PA_CONTEXT_TERMINATED:
\r
8170 case PA_CONTEXT_FAILED:
\r
8171 pa_threaded_mainloop_signal(mainloop, 0);
\r
8178 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8180 /* Set up some defaults in case we crash and burn */
\r
8181 RtAudio::DeviceInfo info;
\r
8182 info.probed = true;
\r
8183 info.name = "PulseAudio";
\r
8184 info.outputChannels = 2;
\r
8185 info.inputChannels = 2;
\r
8186 info.duplexChannels = 2;
\r
8187 info.isDefaultOutput = true;
\r
8188 info.isDefaultInput = true;
\r
8190 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8191 info.sampleRates.push_back( *sr );
\r
8193 info.preferredSampleRate = 48000;
\r
8194 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8196 /* Get the number of output channels from pulseaudio. A simple task, you say?
\r
8197 "What is your mainloop?" */
\r
8198 mainloop_ = pa_threaded_mainloop_new();
\r
8203 pa_threaded_mainloop_start(mainloop_);
\r
8204 pa_threaded_mainloop_lock(mainloop_);
\r
8206 /* "And what is your context?" */
\r
8207 pa_context* context = pa_context_new(pa_threaded_mainloop_get_api(mainloop_), "RtAudio");
\r
8209 pa_threaded_mainloop_unlock(mainloop_);
\r
8210 pa_threaded_mainloop_stop(mainloop_);
\r
8211 pa_threaded_mainloop_free(mainloop_);
\r
8216 pa_context_set_state_callback(context, contextStateCallback, mainloop_);
\r
8218 pa_context_connect(context, 0, (pa_context_flags_t) 0, 0);
\r
8220 /* "And what is your favourite colour?" */
\r
8221 int connected = 0;
\r
8222 pa_context_state_t state = pa_context_get_state(context);
\r
8223 for (; !connected; state = pa_context_get_state(context)) {
\r
8225 case PA_CONTEXT_READY:
\r
8228 case PA_CONTEXT_FAILED:
\r
8229 case PA_CONTEXT_TERMINATED:
\r
8230 /* Blue! No, I mean red! */
\r
8231 pa_threaded_mainloop_unlock(mainloop_);
\r
8232 pa_context_disconnect(context);
\r
8233 pa_context_unref(context);
\r
8234 pa_threaded_mainloop_stop(mainloop_);
\r
8235 pa_threaded_mainloop_free(mainloop_);
\r
8239 pa_threaded_mainloop_wait(mainloop_);
\r
8244 pa_operation* op = pa_context_get_sink_info_by_index(context, 0, sinkInfoCallback, this);
\r
8247 pa_operation_unref(op);
\r
8250 pa_threaded_mainloop_wait(mainloop_);
\r
8251 pa_threaded_mainloop_unlock(mainloop_);
\r
8253 pa_context_disconnect(context);
\r
8254 pa_context_unref(context);
\r
8256 pa_threaded_mainloop_stop(mainloop_);
\r
8257 pa_threaded_mainloop_free(mainloop_);
\r
8260 info.outputChannels = channels_;
\r
8265 static void *pulseaudio_callback( void * user )
\r
8267 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8268 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8269 volatile bool *isRunning = &cbi->isRunning;
\r
8271 while ( *isRunning ) {
\r
8272 pthread_testcancel();
\r
8273 context->callbackEvent();
\r
8276 pthread_exit( NULL );
\r
8279 void RtApiPulse::closeStream( void )
\r
8281 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8283 stream_.callbackInfo.isRunning = false;
\r
8285 MUTEX_LOCK( &stream_.mutex );
\r
8286 if ( stream_.state == STREAM_STOPPED ) {
\r
8287 pah->runnable = true;
\r
8288 pthread_cond_signal( &pah->runnable_cv );
\r
8290 MUTEX_UNLOCK( &stream_.mutex );
\r
8292 pthread_join( pah->thread, 0 );
\r
8293 if ( pah->s_play ) {
\r
8294 pa_simple_flush( pah->s_play, NULL );
\r
8295 pa_simple_free( pah->s_play );
\r
8298 pa_simple_free( pah->s_rec );
\r
8300 pthread_cond_destroy( &pah->runnable_cv );
\r
8302 stream_.apiHandle = 0;
\r
8305 if ( stream_.userBuffer[0] ) {
\r
8306 free( stream_.userBuffer[0] );
\r
8307 stream_.userBuffer[0] = 0;
\r
8309 if ( stream_.userBuffer[1] ) {
\r
8310 free( stream_.userBuffer[1] );
\r
8311 stream_.userBuffer[1] = 0;
\r
8314 stream_.state = STREAM_CLOSED;
\r
8315 stream_.mode = UNINITIALIZED;
\r
8318 void RtApiPulse::callbackEvent( void )
\r
8320 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8322 if ( stream_.state == STREAM_STOPPED ) {
\r
8323 MUTEX_LOCK( &stream_.mutex );
\r
8324 while ( !pah->runnable )
\r
8325 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8327 if ( stream_.state != STREAM_RUNNING ) {
\r
8328 MUTEX_UNLOCK( &stream_.mutex );
\r
8331 MUTEX_UNLOCK( &stream_.mutex );
\r
8334 if ( stream_.state == STREAM_CLOSED ) {
\r
8335 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8336 "this shouldn't happen!";
\r
8337 error( RtAudioError::WARNING );
\r
8341 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8342 double streamTime = getStreamTime();
\r
8343 RtAudioStreamStatus status = 0;
\r
8344 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8345 stream_.bufferSize, streamTime, status,
\r
8346 stream_.callbackInfo.userData );
\r
8348 if ( doStopStream == 2 ) {
\r
8353 MUTEX_LOCK( &stream_.mutex );
\r
8354 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8355 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8357 if ( stream_.state != STREAM_RUNNING )
\r
8362 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8363 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8364 convertBuffer( stream_.deviceBuffer,
\r
8365 stream_.userBuffer[OUTPUT],
\r
8366 stream_.convertInfo[OUTPUT] );
\r
8367 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8368 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8370 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8371 formatBytes( stream_.userFormat );
\r
8373 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8374 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8375 pa_strerror( pa_error ) << ".";
\r
8376 errorText_ = errorStream_.str();
\r
8377 error( RtAudioError::WARNING );
\r
8381 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8382 if ( stream_.doConvertBuffer[INPUT] )
\r
8383 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8384 formatBytes( stream_.deviceFormat[INPUT] );
\r
8386 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8387 formatBytes( stream_.userFormat );
\r
8389 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8390 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8391 pa_strerror( pa_error ) << ".";
\r
8392 errorText_ = errorStream_.str();
\r
8393 error( RtAudioError::WARNING );
\r
8395 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8396 convertBuffer( stream_.userBuffer[INPUT],
\r
8397 stream_.deviceBuffer,
\r
8398 stream_.convertInfo[INPUT] );
\r
8403 MUTEX_UNLOCK( &stream_.mutex );
\r
8404 RtApi::tickStreamTime();
\r
8406 if (pah->s_play) {
\r
8408 pa_usec_t const lat = pa_simple_get_latency(pah->s_play, &e);
\r
8410 stream_.latency[0] = lat * stream_.sampleRate / 1000000;
\r
8414 if ( doStopStream == 1 )
\r
8418 void RtApiPulse::startStream( void )
\r
8420 RtApi::startStream();
\r
8422 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8424 if ( stream_.state == STREAM_CLOSED ) {
\r
8425 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8426 error( RtAudioError::INVALID_USE );
\r
8429 if ( stream_.state == STREAM_RUNNING ) {
\r
8430 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8431 error( RtAudioError::WARNING );
\r
8435 MUTEX_LOCK( &stream_.mutex );
\r
8437 stream_.state = STREAM_RUNNING;
\r
8439 pah->runnable = true;
\r
8440 pthread_cond_signal( &pah->runnable_cv );
\r
8441 MUTEX_UNLOCK( &stream_.mutex );
\r
8444 void RtApiPulse::stopStream( void )
\r
8446 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8448 if ( stream_.state == STREAM_CLOSED ) {
\r
8449 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8450 error( RtAudioError::INVALID_USE );
\r
8453 if ( stream_.state == STREAM_STOPPED ) {
\r
8454 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8455 error( RtAudioError::WARNING );
\r
8459 stream_.state = STREAM_STOPPED;
\r
8460 pah->runnable = false;
\r
8461 MUTEX_LOCK( &stream_.mutex );
\r
8463 if ( pah && pah->s_play ) {
\r
8465 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8466 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8467 pa_strerror( pa_error ) << ".";
\r
8468 errorText_ = errorStream_.str();
\r
8469 MUTEX_UNLOCK( &stream_.mutex );
\r
8470 error( RtAudioError::SYSTEM_ERROR );
\r
8475 stream_.state = STREAM_STOPPED;
\r
8476 MUTEX_UNLOCK( &stream_.mutex );
\r
8479 void RtApiPulse::abortStream( void )
\r
8481 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8483 if ( stream_.state == STREAM_CLOSED ) {
\r
8484 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8485 error( RtAudioError::INVALID_USE );
\r
8488 if ( stream_.state == STREAM_STOPPED ) {
\r
8489 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8490 error( RtAudioError::WARNING );
\r
8494 stream_.state = STREAM_STOPPED;
\r
8495 pah->runnable = false;
\r
8496 MUTEX_LOCK( &stream_.mutex );
\r
8498 if ( pah && pah->s_play ) {
\r
8500 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8501 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8502 pa_strerror( pa_error ) << ".";
\r
8503 errorText_ = errorStream_.str();
\r
8504 MUTEX_UNLOCK( &stream_.mutex );
\r
8505 error( RtAudioError::SYSTEM_ERROR );
\r
8510 stream_.state = STREAM_STOPPED;
\r
8511 MUTEX_UNLOCK( &stream_.mutex );
\r
8514 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8515 unsigned int channels, unsigned int firstChannel,
\r
8516 unsigned int sampleRate, RtAudioFormat format,
\r
8517 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8519 PulseAudioHandle *pah = 0;
\r
8520 unsigned long bufferBytes = 0;
\r
8521 pa_sample_spec ss;
\r
8523 if ( device != 0 ) return false;
\r
8524 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8525 ss.channels = channels;
\r
8527 if ( firstChannel != 0 ) return false;
\r
8529 bool sr_found = false;
\r
8530 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8531 if ( sampleRate == *sr ) {
\r
8533 stream_.sampleRate = sampleRate;
\r
8534 ss.rate = sampleRate;
\r
8538 if ( !sr_found ) {
\r
8539 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8543 bool sf_found = 0;
\r
8544 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8545 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8546 if ( format == sf->rtaudio_format ) {
\r
8548 stream_.userFormat = sf->rtaudio_format;
\r
8549 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8550 ss.format = sf->pa_format;
\r
8554 if ( !sf_found ) { // Use internal data format conversion.
\r
8555 stream_.userFormat = format;
\r
8556 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8557 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8560 // Set other stream parameters.
\r
8561 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8562 else stream_.userInterleaved = true;
\r
8563 stream_.deviceInterleaved[mode] = true;
\r
8564 stream_.nBuffers = 1;
\r
8565 stream_.doByteSwap[mode] = false;
\r
8566 stream_.nUserChannels[mode] = channels;
\r
8567 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8568 stream_.channelOffset[mode] = 0;
\r
8569 std::string streamName = "RtAudio";
\r
8571 // Set flags for buffer conversion.
\r
8572 stream_.doConvertBuffer[mode] = false;
\r
8573 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8574 stream_.doConvertBuffer[mode] = true;
\r
8575 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8576 stream_.doConvertBuffer[mode] = true;
\r
8578 // Allocate necessary internal buffers.
\r
8579 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8580 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8581 if ( stream_.userBuffer[mode] == NULL ) {
\r
8582 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8585 stream_.bufferSize = *bufferSize;
\r
8587 if ( stream_.doConvertBuffer[mode] ) {
\r
8589 bool makeBuffer = true;
\r
8590 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8591 if ( mode == INPUT ) {
\r
8592 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8593 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8594 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8598 if ( makeBuffer ) {
\r
8599 bufferBytes *= *bufferSize;
\r
8600 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8601 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8602 if ( stream_.deviceBuffer == NULL ) {
\r
8603 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8609 stream_.device[mode] = device;
\r
8611 // Setup the buffer conversion information structure.
\r
8612 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8614 if ( !stream_.apiHandle ) {
\r
8615 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8617 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8621 stream_.apiHandle = pah;
\r
8622 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8623 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8627 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8630 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8633 pa_buffer_attr buffer_attr;
\r
8634 buffer_attr.fragsize = bufferBytes;
\r
8635 buffer_attr.maxlength = -1;
\r
8636 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8637 if ( !pah->s_rec ) {
\r
8638 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8643 /* XXX: hard-coded for DCP-o-matic */
\r
8644 pa_channel_map map;
\r
8645 pa_channel_map_init(&map);
\r
8646 /* XXX: need to check 7.1 */
\r
8647 map.channels = channels;
\r
8649 if (channels > 0) {
\r
8650 map.map[0] = PA_CHANNEL_POSITION_FRONT_LEFT;
\r
8652 if (channels > 1) {
\r
8653 map.map[1] = PA_CHANNEL_POSITION_FRONT_RIGHT;
\r
8655 if (channels > 2) {
\r
8656 map.map[2] = PA_CHANNEL_POSITION_FRONT_CENTER;
\r
8658 if (channels > 3) {
\r
8659 map.map[3] = PA_CHANNEL_POSITION_LFE;
\r
8661 if (channels > 4) {
\r
8662 map.map[4] = PA_CHANNEL_POSITION_REAR_LEFT;
\r
8664 if (channels > 5) {
\r
8665 map.map[5] = PA_CHANNEL_POSITION_REAR_RIGHT;
\r
8668 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, &map, NULL, &error );
\r
8669 if ( !pah->s_play ) {
\r
8670 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8678 if ( stream_.mode == UNINITIALIZED )
\r
8679 stream_.mode = mode;
\r
8680 else if ( stream_.mode == mode )
\r
8683 stream_.mode = DUPLEX;
\r
8685 if ( !stream_.callbackInfo.isRunning ) {
\r
8686 stream_.callbackInfo.object = this;
\r
8687 stream_.callbackInfo.isRunning = true;
\r
8688 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8689 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8694 stream_.state = STREAM_STOPPED;
\r
8698 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8699 pthread_cond_destroy( &pah->runnable_cv );
\r
8701 stream_.apiHandle = 0;
\r
8704 for ( int i=0; i<2; i++ ) {
\r
8705 if ( stream_.userBuffer[i] ) {
\r
8706 free( stream_.userBuffer[i] );
\r
8707 stream_.userBuffer[i] = 0;
\r
8711 if ( stream_.deviceBuffer ) {
\r
8712 free( stream_.deviceBuffer );
\r
8713 stream_.deviceBuffer = 0;
\r
8719 //******************** End of __LINUX_PULSE__ *********************//
\r
8722 #if defined(__LINUX_OSS__)
\r
8724 #include <unistd.h>
\r
8725 #include <sys/ioctl.h>
\r
8726 #include <unistd.h>
\r
8727 #include <fcntl.h>
\r
8728 #include <sys/soundcard.h>
\r
8729 #include <errno.h>
\r
8732 static void *ossCallbackHandler(void * ptr);
\r
8734 // A structure to hold various information related to the OSS API
\r
8735 // implementation.
\r
8736 struct OssHandle {
\r
8737 int id[2]; // device ids
\r
8740 pthread_cond_t runnable;
\r
8743 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8746 RtApiOss :: RtApiOss()
\r
8748 // Nothing to do here.
\r
8751 RtApiOss :: ~RtApiOss()
\r
8753 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8756 unsigned int RtApiOss :: getDeviceCount( void )
\r
8758 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8759 if ( mixerfd == -1 ) {
\r
8760 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8761 error( RtAudioError::WARNING );
\r
8765 oss_sysinfo sysinfo;
\r
8766 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8768 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8769 error( RtAudioError::WARNING );
\r
8774 return sysinfo.numaudios;
\r
8777 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8779 RtAudio::DeviceInfo info;
\r
8780 info.probed = false;
\r
8782 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8783 if ( mixerfd == -1 ) {
\r
8784 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8785 error( RtAudioError::WARNING );
\r
8789 oss_sysinfo sysinfo;
\r
8790 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8791 if ( result == -1 ) {
\r
8793 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8794 error( RtAudioError::WARNING );
\r
8798 unsigned nDevices = sysinfo.numaudios;
\r
8799 if ( nDevices == 0 ) {
\r
8801 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8802 error( RtAudioError::INVALID_USE );
\r
8806 if ( device >= nDevices ) {
\r
8808 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8809 error( RtAudioError::INVALID_USE );
\r
8813 oss_audioinfo ainfo;
\r
8814 ainfo.dev = device;
\r
8815 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8817 if ( result == -1 ) {
\r
8818 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8819 errorText_ = errorStream_.str();
\r
8820 error( RtAudioError::WARNING );
\r
8825 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8826 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8827 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8828 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8829 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8832 // Probe data formats ... do for input
\r
8833 unsigned long mask = ainfo.iformats;
\r
8834 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8835 info.nativeFormats |= RTAUDIO_SINT16;
\r
8836 if ( mask & AFMT_S8 )
\r
8837 info.nativeFormats |= RTAUDIO_SINT8;
\r
8838 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8839 info.nativeFormats |= RTAUDIO_SINT32;
\r
8840 if ( mask & AFMT_FLOAT )
\r
8841 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8842 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8843 info.nativeFormats |= RTAUDIO_SINT24;
\r
8845 // Check that we have at least one supported format
\r
8846 if ( info.nativeFormats == 0 ) {
\r
8847 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8848 errorText_ = errorStream_.str();
\r
8849 error( RtAudioError::WARNING );
\r
8853 // Probe the supported sample rates.
\r
8854 info.sampleRates.clear();
\r
8855 if ( ainfo.nrates ) {
\r
8856 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8857 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8858 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8859 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8861 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8862 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8870 // Check min and max rate values;
\r
8871 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8872 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8873 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8875 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8876 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8881 if ( info.sampleRates.size() == 0 ) {
\r
8882 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8883 errorText_ = errorStream_.str();
\r
8884 error( RtAudioError::WARNING );
\r
8887 info.probed = true;
\r
8888 info.name = ainfo.name;
\r
8895 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8896 unsigned int firstChannel, unsigned int sampleRate,
\r
8897 RtAudioFormat format, unsigned int *bufferSize,
\r
8898 RtAudio::StreamOptions *options )
\r
8900 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8901 if ( mixerfd == -1 ) {
\r
8902 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8906 oss_sysinfo sysinfo;
\r
8907 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8908 if ( result == -1 ) {
\r
8910 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8914 unsigned nDevices = sysinfo.numaudios;
\r
8915 if ( nDevices == 0 ) {
\r
8916 // This should not happen because a check is made before this function is called.
\r
8918 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8922 if ( device >= nDevices ) {
\r
8923 // This should not happen because a check is made before this function is called.
\r
8925 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8929 oss_audioinfo ainfo;
\r
8930 ainfo.dev = device;
\r
8931 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8933 if ( result == -1 ) {
\r
8934 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8935 errorText_ = errorStream_.str();
\r
8939 // Check if device supports input or output
\r
8940 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8941 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8942 if ( mode == OUTPUT )
\r
8943 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8945 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8946 errorText_ = errorStream_.str();
\r
8951 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8952 if ( mode == OUTPUT )
\r
8953 flags |= O_WRONLY;
\r
8954 else { // mode == INPUT
\r
8955 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8956 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8957 close( handle->id[0] );
\r
8958 handle->id[0] = 0;
\r
8959 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8960 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8961 errorText_ = errorStream_.str();
\r
8964 // Check that the number previously set channels is the same.
\r
8965 if ( stream_.nUserChannels[0] != channels ) {
\r
8966 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8967 errorText_ = errorStream_.str();
\r
8973 flags |= O_RDONLY;
\r
8976 // Set exclusive access if specified.
\r
8977 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8979 // Try to open the device.
\r
8981 fd = open( ainfo.devnode, flags, 0 );
\r
8983 if ( errno == EBUSY )
\r
8984 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8986 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8987 errorText_ = errorStream_.str();
\r
8991 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8993 if ( flags | O_RDWR ) {
\r
8994 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8995 if ( result == -1) {
\r
8996 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8997 errorText_ = errorStream_.str();
\r
9003 // Check the device channel support.
\r
9004 stream_.nUserChannels[mode] = channels;
\r
9005 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
9007 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
9008 errorText_ = errorStream_.str();
\r
9012 // Set the number of channels.
\r
9013 int deviceChannels = channels + firstChannel;
\r
9014 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
9015 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
9017 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
9018 errorText_ = errorStream_.str();
\r
9021 stream_.nDeviceChannels[mode] = deviceChannels;
\r
9023 // Get the data format mask
\r
9025 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
9026 if ( result == -1 ) {
\r
9028 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
9029 errorText_ = errorStream_.str();
\r
9033 // Determine how to set the device format.
\r
9034 stream_.userFormat = format;
\r
9035 int deviceFormat = -1;
\r
9036 stream_.doByteSwap[mode] = false;
\r
9037 if ( format == RTAUDIO_SINT8 ) {
\r
9038 if ( mask & AFMT_S8 ) {
\r
9039 deviceFormat = AFMT_S8;
\r
9040 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9043 else if ( format == RTAUDIO_SINT16 ) {
\r
9044 if ( mask & AFMT_S16_NE ) {
\r
9045 deviceFormat = AFMT_S16_NE;
\r
9046 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9048 else if ( mask & AFMT_S16_OE ) {
\r
9049 deviceFormat = AFMT_S16_OE;
\r
9050 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9051 stream_.doByteSwap[mode] = true;
\r
9054 else if ( format == RTAUDIO_SINT24 ) {
\r
9055 if ( mask & AFMT_S24_NE ) {
\r
9056 deviceFormat = AFMT_S24_NE;
\r
9057 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9059 else if ( mask & AFMT_S24_OE ) {
\r
9060 deviceFormat = AFMT_S24_OE;
\r
9061 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9062 stream_.doByteSwap[mode] = true;
\r
9065 else if ( format == RTAUDIO_SINT32 ) {
\r
9066 if ( mask & AFMT_S32_NE ) {
\r
9067 deviceFormat = AFMT_S32_NE;
\r
9068 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9070 else if ( mask & AFMT_S32_OE ) {
\r
9071 deviceFormat = AFMT_S32_OE;
\r
9072 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9073 stream_.doByteSwap[mode] = true;
\r
9077 if ( deviceFormat == -1 ) {
\r
9078 // The user requested format is not natively supported by the device.
\r
9079 if ( mask & AFMT_S16_NE ) {
\r
9080 deviceFormat = AFMT_S16_NE;
\r
9081 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9083 else if ( mask & AFMT_S32_NE ) {
\r
9084 deviceFormat = AFMT_S32_NE;
\r
9085 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9087 else if ( mask & AFMT_S24_NE ) {
\r
9088 deviceFormat = AFMT_S24_NE;
\r
9089 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9091 else if ( mask & AFMT_S16_OE ) {
\r
9092 deviceFormat = AFMT_S16_OE;
\r
9093 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9094 stream_.doByteSwap[mode] = true;
\r
9096 else if ( mask & AFMT_S32_OE ) {
\r
9097 deviceFormat = AFMT_S32_OE;
\r
9098 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9099 stream_.doByteSwap[mode] = true;
\r
9101 else if ( mask & AFMT_S24_OE ) {
\r
9102 deviceFormat = AFMT_S24_OE;
\r
9103 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9104 stream_.doByteSwap[mode] = true;
\r
9106 else if ( mask & AFMT_S8) {
\r
9107 deviceFormat = AFMT_S8;
\r
9108 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9112 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9113 // This really shouldn't happen ...
\r
9115 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9116 errorText_ = errorStream_.str();
\r
9120 // Set the data format.
\r
9121 int temp = deviceFormat;
\r
9122 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9123 if ( result == -1 || deviceFormat != temp ) {
\r
9125 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9126 errorText_ = errorStream_.str();
\r
9130 // Attempt to set the buffer size. According to OSS, the minimum
\r
9131 // number of buffers is two. The supposed minimum buffer size is 16
\r
9132 // bytes, so that will be our lower bound. The argument to this
\r
9133 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9134 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9135 // We'll check the actual value used near the end of the setup
\r
9137 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9138 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9140 if ( options ) buffers = options->numberOfBuffers;
\r
9141 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9142 if ( buffers < 2 ) buffers = 3;
\r
9143 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9144 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9145 if ( result == -1 ) {
\r
9147 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9148 errorText_ = errorStream_.str();
\r
9151 stream_.nBuffers = buffers;
\r
9153 // Save buffer size (in sample frames).
\r
9154 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9155 stream_.bufferSize = *bufferSize;
\r
9157 // Set the sample rate.
\r
9158 int srate = sampleRate;
\r
9159 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9160 if ( result == -1 ) {
\r
9162 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9163 errorText_ = errorStream_.str();
\r
9167 // Verify the sample rate setup worked.
\r
9168 if ( abs( srate - sampleRate ) > 100 ) {
\r
9170 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9171 errorText_ = errorStream_.str();
\r
9174 stream_.sampleRate = sampleRate;
\r
9176 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9177 // We're doing duplex setup here.
\r
9178 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9179 stream_.nDeviceChannels[0] = deviceChannels;
\r
9182 // Set interleaving parameters.
\r
9183 stream_.userInterleaved = true;
\r
9184 stream_.deviceInterleaved[mode] = true;
\r
9185 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9186 stream_.userInterleaved = false;
\r
9188 // Set flags for buffer conversion
\r
9189 stream_.doConvertBuffer[mode] = false;
\r
9190 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9191 stream_.doConvertBuffer[mode] = true;
\r
9192 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9193 stream_.doConvertBuffer[mode] = true;
\r
9194 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9195 stream_.nUserChannels[mode] > 1 )
\r
9196 stream_.doConvertBuffer[mode] = true;
\r
9198 // Allocate the stream handles if necessary and then save.
\r
9199 if ( stream_.apiHandle == 0 ) {
\r
9201 handle = new OssHandle;
\r
9203 catch ( std::bad_alloc& ) {
\r
9204 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9208 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9209 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9213 stream_.apiHandle = (void *) handle;
\r
9216 handle = (OssHandle *) stream_.apiHandle;
\r
9218 handle->id[mode] = fd;
\r
9220 // Allocate necessary internal buffers.
\r
9221 unsigned long bufferBytes;
\r
9222 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9223 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9224 if ( stream_.userBuffer[mode] == NULL ) {
\r
9225 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9229 if ( stream_.doConvertBuffer[mode] ) {
\r
9231 bool makeBuffer = true;
\r
9232 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9233 if ( mode == INPUT ) {
\r
9234 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9235 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9236 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9240 if ( makeBuffer ) {
\r
9241 bufferBytes *= *bufferSize;
\r
9242 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9243 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9244 if ( stream_.deviceBuffer == NULL ) {
\r
9245 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9251 stream_.device[mode] = device;
\r
9252 stream_.state = STREAM_STOPPED;
\r
9254 // Setup the buffer conversion information structure.
\r
9255 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9257 // Setup thread if necessary.
\r
9258 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9259 // We had already set up an output stream.
\r
9260 stream_.mode = DUPLEX;
\r
9261 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9264 stream_.mode = mode;
\r
9266 // Setup callback thread.
\r
9267 stream_.callbackInfo.object = (void *) this;
\r
9269 // Set the thread attributes for joinable and realtime scheduling
\r
9270 // priority. The higher priority will only take affect if the
\r
9271 // program is run as root or suid.
\r
9272 pthread_attr_t attr;
\r
9273 pthread_attr_init( &attr );
\r
9274 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9275 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9276 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9277 struct sched_param param;
\r
9278 int priority = options->priority;
\r
9279 int min = sched_get_priority_min( SCHED_RR );
\r
9280 int max = sched_get_priority_max( SCHED_RR );
\r
9281 if ( priority < min ) priority = min;
\r
9282 else if ( priority > max ) priority = max;
\r
9283 param.sched_priority = priority;
\r
9284 pthread_attr_setschedparam( &attr, ¶m );
\r
9285 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9288 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9290 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9293 stream_.callbackInfo.isRunning = true;
\r
9294 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9295 pthread_attr_destroy( &attr );
\r
9297 stream_.callbackInfo.isRunning = false;
\r
9298 errorText_ = "RtApiOss::error creating callback thread!";
\r
9307 pthread_cond_destroy( &handle->runnable );
\r
9308 if ( handle->id[0] ) close( handle->id[0] );
\r
9309 if ( handle->id[1] ) close( handle->id[1] );
\r
9311 stream_.apiHandle = 0;
\r
9314 for ( int i=0; i<2; i++ ) {
\r
9315 if ( stream_.userBuffer[i] ) {
\r
9316 free( stream_.userBuffer[i] );
\r
9317 stream_.userBuffer[i] = 0;
\r
9321 if ( stream_.deviceBuffer ) {
\r
9322 free( stream_.deviceBuffer );
\r
9323 stream_.deviceBuffer = 0;
\r
9329 void RtApiOss :: closeStream()
\r
9331 if ( stream_.state == STREAM_CLOSED ) {
\r
9332 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9333 error( RtAudioError::WARNING );
\r
9337 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9338 stream_.callbackInfo.isRunning = false;
\r
9339 MUTEX_LOCK( &stream_.mutex );
\r
9340 if ( stream_.state == STREAM_STOPPED )
\r
9341 pthread_cond_signal( &handle->runnable );
\r
9342 MUTEX_UNLOCK( &stream_.mutex );
\r
9343 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9345 if ( stream_.state == STREAM_RUNNING ) {
\r
9346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9347 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9349 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9350 stream_.state = STREAM_STOPPED;
\r
9354 pthread_cond_destroy( &handle->runnable );
\r
9355 if ( handle->id[0] ) close( handle->id[0] );
\r
9356 if ( handle->id[1] ) close( handle->id[1] );
\r
9358 stream_.apiHandle = 0;
\r
9361 for ( int i=0; i<2; i++ ) {
\r
9362 if ( stream_.userBuffer[i] ) {
\r
9363 free( stream_.userBuffer[i] );
\r
9364 stream_.userBuffer[i] = 0;
\r
9368 if ( stream_.deviceBuffer ) {
\r
9369 free( stream_.deviceBuffer );
\r
9370 stream_.deviceBuffer = 0;
\r
9373 stream_.mode = UNINITIALIZED;
\r
9374 stream_.state = STREAM_CLOSED;
\r
9377 void RtApiOss :: startStream()
\r
9380 RtApi::startStream();
\r
9381 if ( stream_.state == STREAM_RUNNING ) {
\r
9382 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9383 error( RtAudioError::WARNING );
\r
9387 MUTEX_LOCK( &stream_.mutex );
\r
9389 stream_.state = STREAM_RUNNING;
\r
9391 // No need to do anything else here ... OSS automatically starts
\r
9392 // when fed samples.
\r
9394 MUTEX_UNLOCK( &stream_.mutex );
\r
9396 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9397 pthread_cond_signal( &handle->runnable );
\r
9400 void RtApiOss :: stopStream()
\r
9403 if ( stream_.state == STREAM_STOPPED ) {
\r
9404 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9405 error( RtAudioError::WARNING );
\r
9409 MUTEX_LOCK( &stream_.mutex );
\r
9411 // The state might change while waiting on a mutex.
\r
9412 if ( stream_.state == STREAM_STOPPED ) {
\r
9413 MUTEX_UNLOCK( &stream_.mutex );
\r
9418 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9419 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9421 // Flush the output with zeros a few times.
\r
9424 RtAudioFormat format;
\r
9426 if ( stream_.doConvertBuffer[0] ) {
\r
9427 buffer = stream_.deviceBuffer;
\r
9428 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9429 format = stream_.deviceFormat[0];
\r
9432 buffer = stream_.userBuffer[0];
\r
9433 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9434 format = stream_.userFormat;
\r
9437 memset( buffer, 0, samples * formatBytes(format) );
\r
9438 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9439 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9440 if ( result == -1 ) {
\r
9441 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9442 error( RtAudioError::WARNING );
\r
9446 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9447 if ( result == -1 ) {
\r
9448 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9449 errorText_ = errorStream_.str();
\r
9452 handle->triggered = false;
\r
9455 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9456 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9457 if ( result == -1 ) {
\r
9458 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9459 errorText_ = errorStream_.str();
\r
9465 stream_.state = STREAM_STOPPED;
\r
9466 MUTEX_UNLOCK( &stream_.mutex );
\r
9468 if ( result != -1 ) return;
\r
9469 error( RtAudioError::SYSTEM_ERROR );
\r
9472 void RtApiOss :: abortStream()
\r
9475 if ( stream_.state == STREAM_STOPPED ) {
\r
9476 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9477 error( RtAudioError::WARNING );
\r
9481 MUTEX_LOCK( &stream_.mutex );
\r
9483 // The state might change while waiting on a mutex.
\r
9484 if ( stream_.state == STREAM_STOPPED ) {
\r
9485 MUTEX_UNLOCK( &stream_.mutex );
\r
9490 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9491 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9492 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9493 if ( result == -1 ) {
\r
9494 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9495 errorText_ = errorStream_.str();
\r
9498 handle->triggered = false;
\r
9501 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9502 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9503 if ( result == -1 ) {
\r
9504 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9505 errorText_ = errorStream_.str();
\r
9511 stream_.state = STREAM_STOPPED;
\r
9512 MUTEX_UNLOCK( &stream_.mutex );
\r
9514 if ( result != -1 ) return;
\r
9515 error( RtAudioError::SYSTEM_ERROR );
\r
9518 void RtApiOss :: callbackEvent()
\r
9520 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9521 if ( stream_.state == STREAM_STOPPED ) {
\r
9522 MUTEX_LOCK( &stream_.mutex );
\r
9523 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9524 if ( stream_.state != STREAM_RUNNING ) {
\r
9525 MUTEX_UNLOCK( &stream_.mutex );
\r
9528 MUTEX_UNLOCK( &stream_.mutex );
\r
9531 if ( stream_.state == STREAM_CLOSED ) {
\r
9532 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9533 error( RtAudioError::WARNING );
\r
9537 // Invoke user callback to get fresh output data.
\r
9538 int doStopStream = 0;
\r
9539 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9540 double streamTime = getStreamTime();
\r
9541 RtAudioStreamStatus status = 0;
\r
9542 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9543 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9544 handle->xrun[0] = false;
\r
9546 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9547 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9548 handle->xrun[1] = false;
\r
9550 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9551 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9552 if ( doStopStream == 2 ) {
\r
9553 this->abortStream();
\r
9557 MUTEX_LOCK( &stream_.mutex );
\r
9559 // The state might change while waiting on a mutex.
\r
9560 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9565 RtAudioFormat format;
\r
9567 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9569 // Setup parameters and do buffer conversion if necessary.
\r
9570 if ( stream_.doConvertBuffer[0] ) {
\r
9571 buffer = stream_.deviceBuffer;
\r
9572 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9573 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9574 format = stream_.deviceFormat[0];
\r
9577 buffer = stream_.userBuffer[0];
\r
9578 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9579 format = stream_.userFormat;
\r
9582 // Do byte swapping if necessary.
\r
9583 if ( stream_.doByteSwap[0] )
\r
9584 byteSwapBuffer( buffer, samples, format );
\r
9586 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9588 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9589 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9590 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9591 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9592 handle->triggered = true;
\r
9595 // Write samples to device.
\r
9596 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9598 if ( result == -1 ) {
\r
9599 // We'll assume this is an underrun, though there isn't a
\r
9600 // specific means for determining that.
\r
9601 handle->xrun[0] = true;
\r
9602 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9603 error( RtAudioError::WARNING );
\r
9604 // Continue on to input section.
\r
9608 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9610 // Setup parameters.
\r
9611 if ( stream_.doConvertBuffer[1] ) {
\r
9612 buffer = stream_.deviceBuffer;
\r
9613 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9614 format = stream_.deviceFormat[1];
\r
9617 buffer = stream_.userBuffer[1];
\r
9618 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9619 format = stream_.userFormat;
\r
9622 // Read samples from device.
\r
9623 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9625 if ( result == -1 ) {
\r
9626 // We'll assume this is an overrun, though there isn't a
\r
9627 // specific means for determining that.
\r
9628 handle->xrun[1] = true;
\r
9629 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9630 error( RtAudioError::WARNING );
\r
9634 // Do byte swapping if necessary.
\r
9635 if ( stream_.doByteSwap[1] )
\r
9636 byteSwapBuffer( buffer, samples, format );
\r
9638 // Do buffer conversion if necessary.
\r
9639 if ( stream_.doConvertBuffer[1] )
\r
9640 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9644 MUTEX_UNLOCK( &stream_.mutex );
\r
9646 RtApi::tickStreamTime();
\r
9647 if ( doStopStream == 1 ) this->stopStream();
\r
9650 static void *ossCallbackHandler( void *ptr )
\r
9652 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9653 RtApiOss *object = (RtApiOss *) info->object;
\r
9654 bool *isRunning = &info->isRunning;
\r
9656 while ( *isRunning == true ) {
\r
9657 pthread_testcancel();
\r
9658 object->callbackEvent();
\r
9661 pthread_exit( NULL );
\r
9664 //******************** End of __LINUX_OSS__ *********************//
\r
9668 // *************************************************** //
\r
9670 // Protected common (OS-independent) RtAudio methods.
\r
9672 // *************************************************** //
\r
9674 // This method can be modified to control the behavior of error
\r
9675 // message printing.
\r
9676 void RtApi :: error( RtAudioError::Type type )
\r
9678 errorStream_.str(""); // clear the ostringstream
\r
9680 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9681 if ( errorCallback ) {
\r
9682 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9684 if ( firstErrorOccurred_ )
\r
9687 firstErrorOccurred_ = true;
\r
9688 const std::string errorMessage = errorText_;
\r
9690 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9691 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9695 errorCallback( type, errorMessage );
\r
9696 firstErrorOccurred_ = false;
\r
9700 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9701 std::cerr << '\n' << errorText_ << "\n\n";
\r
9702 else if ( type != RtAudioError::WARNING )
\r
9703 throw( RtAudioError( errorText_, type ) );
\r
9706 void RtApi :: verifyStream()
\r
9708 if ( stream_.state == STREAM_CLOSED ) {
\r
9709 errorText_ = "RtApi:: a stream is not open!";
\r
9710 error( RtAudioError::INVALID_USE );
\r
9714 void RtApi :: clearStreamInfo()
\r
9716 stream_.mode = UNINITIALIZED;
\r
9717 stream_.state = STREAM_CLOSED;
\r
9718 stream_.sampleRate = 0;
\r
9719 stream_.bufferSize = 0;
\r
9720 stream_.nBuffers = 0;
\r
9721 stream_.userFormat = 0;
\r
9722 stream_.userInterleaved = true;
\r
9723 stream_.streamTime = 0.0;
\r
9724 stream_.apiHandle = 0;
\r
9725 stream_.deviceBuffer = 0;
\r
9726 stream_.callbackInfo.callback = 0;
\r
9727 stream_.callbackInfo.userData = 0;
\r
9728 stream_.callbackInfo.isRunning = false;
\r
9729 stream_.callbackInfo.errorCallback = 0;
\r
9730 for ( int i=0; i<2; i++ ) {
\r
9731 stream_.device[i] = 11111;
\r
9732 stream_.doConvertBuffer[i] = false;
\r
9733 stream_.deviceInterleaved[i] = true;
\r
9734 stream_.doByteSwap[i] = false;
\r
9735 stream_.nUserChannels[i] = 0;
\r
9736 stream_.nDeviceChannels[i] = 0;
\r
9737 stream_.channelOffset[i] = 0;
\r
9738 stream_.deviceFormat[i] = 0;
\r
9739 stream_.latency[i] = 0;
\r
9740 stream_.userBuffer[i] = 0;
\r
9741 stream_.convertInfo[i].channels = 0;
\r
9742 stream_.convertInfo[i].inJump = 0;
\r
9743 stream_.convertInfo[i].outJump = 0;
\r
9744 stream_.convertInfo[i].inFormat = 0;
\r
9745 stream_.convertInfo[i].outFormat = 0;
\r
9746 stream_.convertInfo[i].inOffset.clear();
\r
9747 stream_.convertInfo[i].outOffset.clear();
\r
9751 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9753 if ( format == RTAUDIO_SINT16 )
\r
9755 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9757 else if ( format == RTAUDIO_FLOAT64 )
\r
9759 else if ( format == RTAUDIO_SINT24 )
\r
9761 else if ( format == RTAUDIO_SINT8 )
\r
9764 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9765 error( RtAudioError::WARNING );
\r
9770 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9772 if ( mode == INPUT ) { // convert device to user buffer
\r
9773 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9774 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9775 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9776 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9778 else { // convert user to device buffer
\r
9779 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9780 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9781 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9782 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9785 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9786 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9788 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9790 // Set up the interleave/deinterleave offsets.
\r
9791 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9792 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9793 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9794 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9795 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9796 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9797 stream_.convertInfo[mode].inJump = 1;
\r
9801 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9802 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9803 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9804 stream_.convertInfo[mode].outJump = 1;
\r
9808 else { // no (de)interleaving
\r
9809 if ( stream_.userInterleaved ) {
\r
9810 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9811 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9812 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9816 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9817 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9818 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9819 stream_.convertInfo[mode].inJump = 1;
\r
9820 stream_.convertInfo[mode].outJump = 1;
\r
9825 // Add channel offset.
\r
9826 if ( firstChannel > 0 ) {
\r
9827 if ( stream_.deviceInterleaved[mode] ) {
\r
9828 if ( mode == OUTPUT ) {
\r
9829 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9830 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9833 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9834 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9838 if ( mode == OUTPUT ) {
\r
9839 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9840 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9843 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9844 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9850 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9852 // This function does format conversion, input/output channel compensation, and
\r
9853 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9854 // the lower three bytes of a 32-bit integer.
\r
9856 // Clear our device buffer when in/out duplex device channels are different
\r
9857 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9858 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9859 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9862 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9864 Float64 *out = (Float64 *)outBuffer;
\r
9866 if (info.inFormat == RTAUDIO_SINT8) {
\r
9867 signed char *in = (signed char *)inBuffer;
\r
9868 scale = 1.0 / 127.5;
\r
9869 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9870 for (j=0; j<info.channels; j++) {
\r
9871 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9872 out[info.outOffset[j]] += 0.5;
\r
9873 out[info.outOffset[j]] *= scale;
\r
9875 in += info.inJump;
\r
9876 out += info.outJump;
\r
9879 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9880 Int16 *in = (Int16 *)inBuffer;
\r
9881 scale = 1.0 / 32767.5;
\r
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9883 for (j=0; j<info.channels; j++) {
\r
9884 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9885 out[info.outOffset[j]] += 0.5;
\r
9886 out[info.outOffset[j]] *= scale;
\r
9888 in += info.inJump;
\r
9889 out += info.outJump;
\r
9892 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9893 Int24 *in = (Int24 *)inBuffer;
\r
9894 scale = 1.0 / 8388607.5;
\r
9895 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9896 for (j=0; j<info.channels; j++) {
\r
9897 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9898 out[info.outOffset[j]] += 0.5;
\r
9899 out[info.outOffset[j]] *= scale;
\r
9901 in += info.inJump;
\r
9902 out += info.outJump;
\r
9905 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9906 Int32 *in = (Int32 *)inBuffer;
\r
9907 scale = 1.0 / 2147483647.5;
\r
9908 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9909 for (j=0; j<info.channels; j++) {
\r
9910 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9911 out[info.outOffset[j]] += 0.5;
\r
9912 out[info.outOffset[j]] *= scale;
\r
9914 in += info.inJump;
\r
9915 out += info.outJump;
\r
9918 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9919 Float32 *in = (Float32 *)inBuffer;
\r
9920 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9921 for (j=0; j<info.channels; j++) {
\r
9922 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9924 in += info.inJump;
\r
9925 out += info.outJump;
\r
9928 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9929 // Channel compensation and/or (de)interleaving only.
\r
9930 Float64 *in = (Float64 *)inBuffer;
\r
9931 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9932 for (j=0; j<info.channels; j++) {
\r
9933 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9935 in += info.inJump;
\r
9936 out += info.outJump;
\r
9940 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9942 Float32 *out = (Float32 *)outBuffer;
\r
9944 if (info.inFormat == RTAUDIO_SINT8) {
\r
9945 signed char *in = (signed char *)inBuffer;
\r
9946 scale = (Float32) ( 1.0 / 127.5 );
\r
9947 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9948 for (j=0; j<info.channels; j++) {
\r
9949 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9950 out[info.outOffset[j]] += 0.5;
\r
9951 out[info.outOffset[j]] *= scale;
\r
9953 in += info.inJump;
\r
9954 out += info.outJump;
\r
9957 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9958 Int16 *in = (Int16 *)inBuffer;
\r
9959 scale = (Float32) ( 1.0 / 32767.5 );
\r
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9961 for (j=0; j<info.channels; j++) {
\r
9962 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9963 out[info.outOffset[j]] += 0.5;
\r
9964 out[info.outOffset[j]] *= scale;
\r
9966 in += info.inJump;
\r
9967 out += info.outJump;
\r
9970 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9971 Int24 *in = (Int24 *)inBuffer;
\r
9972 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9974 for (j=0; j<info.channels; j++) {
\r
9975 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9976 out[info.outOffset[j]] += 0.5;
\r
9977 out[info.outOffset[j]] *= scale;
\r
9979 in += info.inJump;
\r
9980 out += info.outJump;
\r
9983 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9984 Int32 *in = (Int32 *)inBuffer;
\r
9985 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9986 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9987 for (j=0; j<info.channels; j++) {
\r
9988 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9989 out[info.outOffset[j]] += 0.5;
\r
9990 out[info.outOffset[j]] *= scale;
\r
9992 in += info.inJump;
\r
9993 out += info.outJump;
\r
9996 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9997 // Channel compensation and/or (de)interleaving only.
\r
9998 Float32 *in = (Float32 *)inBuffer;
\r
9999 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10000 for (j=0; j<info.channels; j++) {
\r
10001 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10003 in += info.inJump;
\r
10004 out += info.outJump;
\r
10007 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10008 Float64 *in = (Float64 *)inBuffer;
\r
10009 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10010 for (j=0; j<info.channels; j++) {
\r
10011 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
10013 in += info.inJump;
\r
10014 out += info.outJump;
\r
10018 else if (info.outFormat == RTAUDIO_SINT32) {
\r
10019 Int32 *out = (Int32 *)outBuffer;
\r
10020 if (info.inFormat == RTAUDIO_SINT8) {
\r
10021 signed char *in = (signed char *)inBuffer;
\r
10022 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10023 for (j=0; j<info.channels; j++) {
\r
10024 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
10025 out[info.outOffset[j]] <<= 24;
\r
10027 in += info.inJump;
\r
10028 out += info.outJump;
\r
10031 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10032 Int16 *in = (Int16 *)inBuffer;
\r
10033 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10034 for (j=0; j<info.channels; j++) {
\r
10035 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
10036 out[info.outOffset[j]] <<= 16;
\r
10038 in += info.inJump;
\r
10039 out += info.outJump;
\r
10042 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10043 Int24 *in = (Int24 *)inBuffer;
\r
10044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10045 for (j=0; j<info.channels; j++) {
\r
10046 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
10047 out[info.outOffset[j]] <<= 8;
\r
10049 in += info.inJump;
\r
10050 out += info.outJump;
\r
10053 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10054 // Channel compensation and/or (de)interleaving only.
\r
10055 Int32 *in = (Int32 *)inBuffer;
\r
10056 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10057 for (j=0; j<info.channels; j++) {
\r
10058 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10060 in += info.inJump;
\r
10061 out += info.outJump;
\r
10064 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10065 Float32 *in = (Float32 *)inBuffer;
\r
10066 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10067 for (j=0; j<info.channels; j++) {
\r
10068 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10070 in += info.inJump;
\r
10071 out += info.outJump;
\r
10074 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10075 Float64 *in = (Float64 *)inBuffer;
\r
10076 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10077 for (j=0; j<info.channels; j++) {
\r
10078 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10080 in += info.inJump;
\r
10081 out += info.outJump;
\r
10085 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10086 Int24 *out = (Int24 *)outBuffer;
\r
10087 if (info.inFormat == RTAUDIO_SINT8) {
\r
10088 signed char *in = (signed char *)inBuffer;
\r
10089 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10090 for (j=0; j<info.channels; j++) {
\r
10091 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10092 //out[info.outOffset[j]] <<= 16;
\r
10094 in += info.inJump;
\r
10095 out += info.outJump;
\r
10098 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10099 Int16 *in = (Int16 *)inBuffer;
\r
10100 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10101 for (j=0; j<info.channels; j++) {
\r
10102 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10103 //out[info.outOffset[j]] <<= 8;
\r
10105 in += info.inJump;
\r
10106 out += info.outJump;
\r
10109 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10110 // Channel compensation and/or (de)interleaving only.
\r
10111 Int24 *in = (Int24 *)inBuffer;
\r
10112 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10113 for (j=0; j<info.channels; j++) {
\r
10114 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10116 in += info.inJump;
\r
10117 out += info.outJump;
\r
10120 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10121 Int32 *in = (Int32 *)inBuffer;
\r
10122 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10123 for (j=0; j<info.channels; j++) {
\r
10124 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10125 //out[info.outOffset[j]] >>= 8;
\r
10127 in += info.inJump;
\r
10128 out += info.outJump;
\r
10131 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10132 Float32 *in = (Float32 *)inBuffer;
\r
10133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10134 for (j=0; j<info.channels; j++) {
\r
10135 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10137 in += info.inJump;
\r
10138 out += info.outJump;
\r
10141 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10142 Float64 *in = (Float64 *)inBuffer;
\r
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10144 for (j=0; j<info.channels; j++) {
\r
10145 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10147 in += info.inJump;
\r
10148 out += info.outJump;
\r
10152 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10153 Int16 *out = (Int16 *)outBuffer;
\r
10154 if (info.inFormat == RTAUDIO_SINT8) {
\r
10155 signed char *in = (signed char *)inBuffer;
\r
10156 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10157 for (j=0; j<info.channels; j++) {
\r
10158 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10159 out[info.outOffset[j]] <<= 8;
\r
10161 in += info.inJump;
\r
10162 out += info.outJump;
\r
10165 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10166 // Channel compensation and/or (de)interleaving only.
\r
10167 Int16 *in = (Int16 *)inBuffer;
\r
10168 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10169 for (j=0; j<info.channels; j++) {
\r
10170 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10172 in += info.inJump;
\r
10173 out += info.outJump;
\r
10176 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10177 Int24 *in = (Int24 *)inBuffer;
\r
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10179 for (j=0; j<info.channels; j++) {
\r
10180 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10182 in += info.inJump;
\r
10183 out += info.outJump;
\r
10186 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10187 Int32 *in = (Int32 *)inBuffer;
\r
10188 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10189 for (j=0; j<info.channels; j++) {
\r
10190 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10192 in += info.inJump;
\r
10193 out += info.outJump;
\r
10196 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10197 Float32 *in = (Float32 *)inBuffer;
\r
10198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10199 for (j=0; j<info.channels; j++) {
\r
10200 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10202 in += info.inJump;
\r
10203 out += info.outJump;
\r
10206 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10207 Float64 *in = (Float64 *)inBuffer;
\r
10208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10209 for (j=0; j<info.channels; j++) {
\r
10210 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10212 in += info.inJump;
\r
10213 out += info.outJump;
\r
10217 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10218 signed char *out = (signed char *)outBuffer;
\r
10219 if (info.inFormat == RTAUDIO_SINT8) {
\r
10220 // Channel compensation and/or (de)interleaving only.
\r
10221 signed char *in = (signed char *)inBuffer;
\r
10222 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10223 for (j=0; j<info.channels; j++) {
\r
10224 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10226 in += info.inJump;
\r
10227 out += info.outJump;
\r
10230 if (info.inFormat == RTAUDIO_SINT16) {
\r
10231 Int16 *in = (Int16 *)inBuffer;
\r
10232 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10233 for (j=0; j<info.channels; j++) {
\r
10234 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10236 in += info.inJump;
\r
10237 out += info.outJump;
\r
10240 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10241 Int24 *in = (Int24 *)inBuffer;
\r
10242 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10243 for (j=0; j<info.channels; j++) {
\r
10244 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10246 in += info.inJump;
\r
10247 out += info.outJump;
\r
10250 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10251 Int32 *in = (Int32 *)inBuffer;
\r
10252 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10253 for (j=0; j<info.channels; j++) {
\r
10254 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10256 in += info.inJump;
\r
10257 out += info.outJump;
\r
10260 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10261 Float32 *in = (Float32 *)inBuffer;
\r
10262 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10263 for (j=0; j<info.channels; j++) {
\r
10264 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10266 in += info.inJump;
\r
10267 out += info.outJump;
\r
10270 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10271 Float64 *in = (Float64 *)inBuffer;
\r
10272 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10273 for (j=0; j<info.channels; j++) {
\r
10274 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10276 in += info.inJump;
\r
10277 out += info.outJump;
\r
10283 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10284 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10285 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10287 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10293 if ( format == RTAUDIO_SINT16 ) {
\r
10294 for ( unsigned int i=0; i<samples; i++ ) {
\r
10295 // Swap 1st and 2nd bytes.
\r
10297 *(ptr) = *(ptr+1);
\r
10300 // Increment 2 bytes.
\r
10304 else if ( format == RTAUDIO_SINT32 ||
\r
10305 format == RTAUDIO_FLOAT32 ) {
\r
10306 for ( unsigned int i=0; i<samples; i++ ) {
\r
10307 // Swap 1st and 4th bytes.
\r
10309 *(ptr) = *(ptr+3);
\r
10312 // Swap 2nd and 3rd bytes.
\r
10315 *(ptr) = *(ptr+1);
\r
10318 // Increment 3 more bytes.
\r
10322 else if ( format == RTAUDIO_SINT24 ) {
\r
10323 for ( unsigned int i=0; i<samples; i++ ) {
\r
10324 // Swap 1st and 3rd bytes.
\r
10326 *(ptr) = *(ptr+2);
\r
10329 // Increment 2 more bytes.
\r
10333 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10334 for ( unsigned int i=0; i<samples; i++ ) {
\r
10335 // Swap 1st and 8th bytes
\r
10337 *(ptr) = *(ptr+7);
\r
10340 // Swap 2nd and 7th bytes
\r
10343 *(ptr) = *(ptr+5);
\r
10346 // Swap 3rd and 6th bytes
\r
10349 *(ptr) = *(ptr+3);
\r
10352 // Swap 4th and 5th bytes
\r
10355 *(ptr) = *(ptr+1);
\r
10358 // Increment 5 more bytes.
\r
10364 // Indentation settings for Vim and Emacs
\r
10366 // Local Variables:
\r
10367 // c-basic-offset: 2
\r
10368 // indent-tabs-mode: nil
\r
10371 // vim: et sts=2 sw=2
\r