1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
49 #include <algorithm>
\r
51 // Static variable definitions.
\r
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
53 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
66 static std::string convertCharPointerToStdString(const char *text)
\r
68 return std::string(text);
\r
71 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
74 std::string s( length-1, '\0' );
\r
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
90 // *************************************************** //
\r
92 // RtAudio definitions.
\r
94 // *************************************************** //
\r
96 std::string RtAudio :: getVersion( void )
\r
98 return RTAUDIO_VERSION;
\r
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
\r
105 // The order here will control the order of RtAudio's API search in
\r
106 // the constructor.
\r
107 #if defined(__UNIX_JACK__)
\r
108 apis.push_back( UNIX_JACK );
\r
110 #if defined(__LINUX_ALSA__)
\r
111 apis.push_back( LINUX_ALSA );
\r
113 #if defined(__LINUX_PULSE__)
\r
114 apis.push_back( LINUX_PULSE );
\r
116 #if defined(__LINUX_OSS__)
\r
117 apis.push_back( LINUX_OSS );
\r
119 #if defined(__WINDOWS_ASIO__)
\r
120 apis.push_back( WINDOWS_ASIO );
\r
122 #if defined(__WINDOWS_WASAPI__)
\r
123 apis.push_back( WINDOWS_WASAPI );
\r
125 #if defined(__WINDOWS_DS__)
\r
126 apis.push_back( WINDOWS_DS );
\r
128 #if defined(__MACOSX_CORE__)
\r
129 apis.push_back( MACOSX_CORE );
\r
131 #if defined(__RTAUDIO_DUMMY__)
\r
132 apis.push_back( RTAUDIO_DUMMY );
\r
136 void RtAudio :: openRtApi( RtAudio::Api api )
\r
142 #if defined(__UNIX_JACK__)
\r
143 if ( api == UNIX_JACK )
\r
144 rtapi_ = new RtApiJack();
\r
146 #if defined(__LINUX_ALSA__)
\r
147 if ( api == LINUX_ALSA )
\r
148 rtapi_ = new RtApiAlsa();
\r
150 #if defined(__LINUX_PULSE__)
\r
151 if ( api == LINUX_PULSE )
\r
152 rtapi_ = new RtApiPulse();
\r
154 #if defined(__LINUX_OSS__)
\r
155 if ( api == LINUX_OSS )
\r
156 rtapi_ = new RtApiOss();
\r
158 #if defined(__WINDOWS_ASIO__)
\r
159 if ( api == WINDOWS_ASIO )
\r
160 rtapi_ = new RtApiAsio();
\r
162 #if defined(__WINDOWS_WASAPI__)
\r
163 if ( api == WINDOWS_WASAPI )
\r
164 rtapi_ = new RtApiWasapi();
\r
166 #if defined(__WINDOWS_DS__)
\r
167 if ( api == WINDOWS_DS )
\r
168 rtapi_ = new RtApiDs();
\r
170 #if defined(__MACOSX_CORE__)
\r
171 if ( api == MACOSX_CORE )
\r
172 rtapi_ = new RtApiCore();
\r
174 #if defined(__RTAUDIO_DUMMY__)
\r
175 if ( api == RTAUDIO_DUMMY )
\r
176 rtapi_ = new RtApiDummy();
\r
180 RtAudio :: RtAudio( RtAudio::Api api )
\r
184 if ( api != UNSPECIFIED ) {
\r
185 // Attempt to open the specified API.
\r
187 if ( rtapi_ ) return;
\r
189 // No compiled support for specified API value. Issue a debug
\r
190 // warning and continue as if no API was specified.
\r
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
194 // Iterate through the compiled APIs and return as soon as we find
\r
195 // one with at least one device or we reach the end of the list.
\r
196 std::vector< RtAudio::Api > apis;
\r
197 getCompiledApi( apis );
\r
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
199 openRtApi( apis[i] );
\r
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
203 if ( rtapi_ ) return;
\r
205 // It should not be possible to get here because the preprocessor
\r
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
207 // API-specific definitions are passed to the compiler. But just in
\r
208 // case something weird happens, we'll thow an error.
\r
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
213 RtAudio :: ~RtAudio()
\r
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
220 RtAudio::StreamParameters *inputParameters,
\r
221 RtAudioFormat format, unsigned int sampleRate,
\r
222 unsigned int *bufferFrames,
\r
223 RtAudioCallback callback, void *userData,
\r
224 RtAudio::StreamOptions *options,
\r
225 RtAudioErrorCallback errorCallback )
\r
227 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
228 sampleRate, bufferFrames, callback,
\r
229 userData, options, errorCallback );
\r
232 // *************************************************** //
\r
234 // Public RtApi definitions (see end of file for
\r
235 // private or protected utility functions).
\r
237 // *************************************************** //
\r
241 stream_.state = STREAM_CLOSED;
\r
242 stream_.mode = UNINITIALIZED;
\r
243 stream_.apiHandle = 0;
\r
244 stream_.userBuffer[0] = 0;
\r
245 stream_.userBuffer[1] = 0;
\r
246 MUTEX_INITIALIZE( &stream_.mutex );
\r
247 showWarnings_ = true;
\r
248 firstErrorOccurred_ = false;
\r
253 MUTEX_DESTROY( &stream_.mutex );
\r
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
257 RtAudio::StreamParameters *iParams,
\r
258 RtAudioFormat format, unsigned int sampleRate,
\r
259 unsigned int *bufferFrames,
\r
260 RtAudioCallback callback, void *userData,
\r
261 RtAudio::StreamOptions *options,
\r
262 RtAudioErrorCallback errorCallback )
\r
264 if ( stream_.state != STREAM_CLOSED ) {
\r
265 errorText_ = "RtApi::openStream: a stream is already open!";
\r
266 error( RtAudioError::INVALID_USE );
\r
270 // Clear stream information potentially left from a previously open stream.
\r
273 if ( oParams && oParams->nChannels < 1 ) {
\r
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 if ( iParams && iParams->nChannels < 1 ) {
\r
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
281 error( RtAudioError::INVALID_USE );
\r
285 if ( oParams == NULL && iParams == NULL ) {
\r
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
287 error( RtAudioError::INVALID_USE );
\r
291 if ( formatBytes(format) == 0 ) {
\r
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
293 error( RtAudioError::INVALID_USE );
\r
297 unsigned int nDevices = getDeviceCount();
\r
298 unsigned int oChannels = 0;
\r
300 oChannels = oParams->nChannels;
\r
301 if ( oParams->deviceId >= nDevices ) {
\r
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
303 error( RtAudioError::INVALID_USE );
\r
308 unsigned int iChannels = 0;
\r
310 iChannels = iParams->nChannels;
\r
311 if ( iParams->deviceId >= nDevices ) {
\r
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
313 error( RtAudioError::INVALID_USE );
\r
320 if ( oChannels > 0 ) {
\r
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
323 sampleRate, format, bufferFrames, options );
\r
324 if ( result == false ) {
\r
325 error( RtAudioError::SYSTEM_ERROR );
\r
330 if ( iChannels > 0 ) {
\r
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
333 sampleRate, format, bufferFrames, options );
\r
334 if ( result == false ) {
\r
335 if ( oChannels > 0 ) closeStream();
\r
336 error( RtAudioError::SYSTEM_ERROR );
\r
341 stream_.callbackInfo.callback = (void *) callback;
\r
342 stream_.callbackInfo.userData = userData;
\r
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
346 stream_.state = STREAM_STOPPED;
\r
349 unsigned int RtApi :: getDefaultInputDevice( void )
\r
351 // Should be implemented in subclasses if possible.
\r
355 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
357 // Should be implemented in subclasses if possible.
\r
361 void RtApi :: closeStream( void )
\r
363 // MUST be implemented in subclasses!
\r
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
370 RtAudio::StreamOptions * /*options*/ )
\r
372 // MUST be implemented in subclasses!
\r
376 void RtApi :: tickStreamTime( void )
\r
378 // Subclasses that do not provide their own implementation of
\r
379 // getStreamTime should call this function once per buffer I/O to
\r
380 // provide basic stream time support.
\r
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
384 #if defined( HAVE_GETTIMEOFDAY )
\r
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
389 long RtApi :: getStreamLatency( void )
\r
393 long totalLatency = 0;
\r
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
395 totalLatency = stream_.latency[0];
\r
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
397 totalLatency += stream_.latency[1];
\r
399 return totalLatency;
\r
402 double RtApi :: getStreamTime( void )
\r
406 #if defined( HAVE_GETTIMEOFDAY )
\r
407 // Return a very accurate estimate of the stream time by
\r
408 // adding in the elapsed time since the last tick.
\r
409 struct timeval then;
\r
410 struct timeval now;
\r
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
413 return stream_.streamTime;
\r
415 gettimeofday( &now, NULL );
\r
416 then = stream_.lastTickTimestamp;
\r
417 return stream_.streamTime +
\r
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
419 (then.tv_sec + 0.000001 * then.tv_usec));
\r
421 return stream_.streamTime;
\r
425 void RtApi :: setStreamTime( double time )
\r
430 stream_.streamTime = time;
\r
433 unsigned int RtApi :: getStreamSampleRate( void )
\r
437 return stream_.sampleRate;
\r
441 // *************************************************** //
\r
443 // OS/API-specific methods.
\r
445 // *************************************************** //
\r
447 #if defined(__MACOSX_CORE__)
\r
449 // The OS X CoreAudio API is designed to use a separate callback
\r
450 // procedure for each of its audio devices. A single RtAudio duplex
\r
451 // stream using two different devices is supported here, though it
\r
452 // cannot be guaranteed to always behave correctly because we cannot
\r
453 // synchronize these two callbacks.
\r
455 // A property listener is installed for over/underrun information.
\r
456 // However, no functionality is currently provided to allow property
\r
457 // listeners to trigger user handlers because it is unclear what could
\r
458 // be done if a critical stream parameter (buffer size, sample rate,
\r
459 // device disconnect) notification arrived. The listeners entail
\r
460 // quite a bit of extra code and most likely, a user program wouldn't
\r
461 // be prepared for the result anyway. However, we do provide a flag
\r
462 // to the client callback function to inform of an over/underrun.
\r
464 // A structure to hold various information related to the CoreAudio API
\r
466 struct CoreHandle {
\r
467 AudioDeviceID id[2]; // device ids
\r
468 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
469 AudioDeviceIOProcID procId[2];
\r
471 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
472 UInt32 nStreams[2]; // number of streams to use
\r
474 char *deviceBuffer;
\r
475 pthread_cond_t condition;
\r
476 int drainCounter; // Tracks callback counts when draining
\r
477 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
480 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
483 RtApiCore:: RtApiCore()
\r
485 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
486 // This is a largely undocumented but absolutely necessary
\r
487 // requirement starting with OS-X 10.6. If not called, queries and
\r
488 // updates to various audio device properties are not handled
\r
490 CFRunLoopRef theRunLoop = NULL;
\r
491 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
492 kAudioObjectPropertyScopeGlobal,
\r
493 kAudioObjectPropertyElementMaster };
\r
494 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
495 if ( result != noErr ) {
\r
496 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
497 error( RtAudioError::WARNING );
\r
502 RtApiCore :: ~RtApiCore()
\r
504 // The subclass destructor gets called before the base class
\r
505 // destructor, so close an existing stream before deallocating
\r
506 // apiDeviceId memory.
\r
507 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
510 unsigned int RtApiCore :: getDeviceCount( void )
\r
512 // Find out how many audio devices there are, if any.
\r
514 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
515 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
516 if ( result != noErr ) {
\r
517 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
518 error( RtAudioError::WARNING );
\r
522 return dataSize / sizeof( AudioDeviceID );
\r
525 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
527 unsigned int nDevices = getDeviceCount();
\r
528 if ( nDevices <= 1 ) return 0;
\r
531 UInt32 dataSize = sizeof( AudioDeviceID );
\r
532 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
533 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
534 if ( result != noErr ) {
\r
535 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
536 error( RtAudioError::WARNING );
\r
540 dataSize *= nDevices;
\r
541 AudioDeviceID deviceList[ nDevices ];
\r
542 property.mSelector = kAudioHardwarePropertyDevices;
\r
543 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
544 if ( result != noErr ) {
\r
545 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
546 error( RtAudioError::WARNING );
\r
550 for ( unsigned int i=0; i<nDevices; i++ )
\r
551 if ( id == deviceList[i] ) return i;
\r
553 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
554 error( RtAudioError::WARNING );
\r
558 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
560 unsigned int nDevices = getDeviceCount();
\r
561 if ( nDevices <= 1 ) return 0;
\r
564 UInt32 dataSize = sizeof( AudioDeviceID );
\r
565 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
566 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
567 if ( result != noErr ) {
\r
568 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
569 error( RtAudioError::WARNING );
\r
573 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
574 AudioDeviceID deviceList[ nDevices ];
\r
575 property.mSelector = kAudioHardwarePropertyDevices;
\r
576 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
577 if ( result != noErr ) {
\r
578 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
579 error( RtAudioError::WARNING );
\r
583 for ( unsigned int i=0; i<nDevices; i++ )
\r
584 if ( id == deviceList[i] ) return i;
\r
586 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
587 error( RtAudioError::WARNING );
\r
591 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
593 RtAudio::DeviceInfo info;
\r
594 info.probed = false;
\r
597 unsigned int nDevices = getDeviceCount();
\r
598 if ( nDevices == 0 ) {
\r
599 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
600 error( RtAudioError::INVALID_USE );
\r
604 if ( device >= nDevices ) {
\r
605 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
606 error( RtAudioError::INVALID_USE );
\r
610 AudioDeviceID deviceList[ nDevices ];
\r
611 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
612 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
613 kAudioObjectPropertyScopeGlobal,
\r
614 kAudioObjectPropertyElementMaster };
\r
615 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
616 0, NULL, &dataSize, (void *) &deviceList );
\r
617 if ( result != noErr ) {
\r
618 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
619 error( RtAudioError::WARNING );
\r
623 AudioDeviceID id = deviceList[ device ];
\r
625 // Get the device name.
\r
627 CFStringRef cfname;
\r
628 dataSize = sizeof( CFStringRef );
\r
629 property.mSelector = kAudioObjectPropertyManufacturer;
\r
630 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
631 if ( result != noErr ) {
\r
632 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
633 errorText_ = errorStream_.str();
\r
634 error( RtAudioError::WARNING );
\r
638 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
639 int length = CFStringGetLength(cfname);
\r
640 char *mname = (char *)malloc(length * 3 + 1);
\r
641 #if defined( UNICODE ) || defined( _UNICODE )
\r
642 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
644 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
646 info.name.append( (const char *)mname, strlen(mname) );
\r
647 info.name.append( ": " );
\r
648 CFRelease( cfname );
\r
651 property.mSelector = kAudioObjectPropertyName;
\r
652 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
653 if ( result != noErr ) {
\r
654 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
655 errorText_ = errorStream_.str();
\r
656 error( RtAudioError::WARNING );
\r
660 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
661 length = CFStringGetLength(cfname);
\r
662 char *name = (char *)malloc(length * 3 + 1);
\r
663 #if defined( UNICODE ) || defined( _UNICODE )
\r
664 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
666 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
668 info.name.append( (const char *)name, strlen(name) );
\r
669 CFRelease( cfname );
\r
672 // Get the output stream "configuration".
\r
673 AudioBufferList *bufferList = nil;
\r
674 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
675 property.mScope = kAudioDevicePropertyScopeOutput;
\r
676 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
678 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
679 if ( result != noErr || dataSize == 0 ) {
\r
680 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
681 errorText_ = errorStream_.str();
\r
682 error( RtAudioError::WARNING );
\r
686 // Allocate the AudioBufferList.
\r
687 bufferList = (AudioBufferList *) malloc( dataSize );
\r
688 if ( bufferList == NULL ) {
\r
689 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
690 error( RtAudioError::WARNING );
\r
694 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
695 if ( result != noErr || dataSize == 0 ) {
\r
696 free( bufferList );
\r
697 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
698 errorText_ = errorStream_.str();
\r
699 error( RtAudioError::WARNING );
\r
703 // Get output channel information.
\r
704 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
705 for ( i=0; i<nStreams; i++ )
\r
706 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
707 free( bufferList );
\r
709 // Get the input stream "configuration".
\r
710 property.mScope = kAudioDevicePropertyScopeInput;
\r
711 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
712 if ( result != noErr || dataSize == 0 ) {
\r
713 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
714 errorText_ = errorStream_.str();
\r
715 error( RtAudioError::WARNING );
\r
719 // Allocate the AudioBufferList.
\r
720 bufferList = (AudioBufferList *) malloc( dataSize );
\r
721 if ( bufferList == NULL ) {
\r
722 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
723 error( RtAudioError::WARNING );
\r
727 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
728 if (result != noErr || dataSize == 0) {
\r
729 free( bufferList );
\r
730 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
731 errorText_ = errorStream_.str();
\r
732 error( RtAudioError::WARNING );
\r
736 // Get input channel information.
\r
737 nStreams = bufferList->mNumberBuffers;
\r
738 for ( i=0; i<nStreams; i++ )
\r
739 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
740 free( bufferList );
\r
742 // If device opens for both playback and capture, we determine the channels.
\r
743 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
744 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
746 // Probe the device sample rates.
\r
747 bool isInput = false;
\r
748 if ( info.outputChannels == 0 ) isInput = true;
\r
750 // Determine the supported sample rates.
\r
751 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
752 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
753 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
754 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
755 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
756 errorText_ = errorStream_.str();
\r
757 error( RtAudioError::WARNING );
\r
761 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
762 AudioValueRange rangeList[ nRanges ];
\r
763 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
764 if ( result != kAudioHardwareNoError ) {
\r
765 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
766 errorText_ = errorStream_.str();
\r
767 error( RtAudioError::WARNING );
\r
771 // The sample rate reporting mechanism is a bit of a mystery. It
\r
772 // seems that it can either return individual rates or a range of
\r
773 // rates. I assume that if the min / max range values are the same,
\r
774 // then that represents a single supported rate and if the min / max
\r
775 // range values are different, the device supports an arbitrary
\r
776 // range of values (though there might be multiple ranges, so we'll
\r
777 // use the most conservative range).
\r
778 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
779 bool haveValueRange = false;
\r
780 info.sampleRates.clear();
\r
781 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
782 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
783 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
784 info.sampleRates.push_back( tmpSr );
\r
786 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
787 info.preferredSampleRate = tmpSr;
\r
790 haveValueRange = true;
\r
791 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
792 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
796 if ( haveValueRange ) {
\r
797 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
798 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
799 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
801 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
802 info.preferredSampleRate = SAMPLE_RATES[k];
\r
807 // Sort and remove any redundant values
\r
808 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
809 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
811 if ( info.sampleRates.size() == 0 ) {
\r
812 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
813 errorText_ = errorStream_.str();
\r
814 error( RtAudioError::WARNING );
\r
818 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
819 // Thus, any other "physical" formats supported by the device are of
\r
820 // no interest to the client.
\r
821 info.nativeFormats = RTAUDIO_FLOAT32;
\r
823 if ( info.outputChannels > 0 )
\r
824 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
825 if ( info.inputChannels > 0 )
\r
826 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
828 info.probed = true;
\r
832 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
833 const AudioTimeStamp* /*inNow*/,
\r
834 const AudioBufferList* inInputData,
\r
835 const AudioTimeStamp* /*inInputTime*/,
\r
836 AudioBufferList* outOutputData,
\r
837 const AudioTimeStamp* /*inOutputTime*/,
\r
838 void* infoPointer )
\r
840 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
842 RtApiCore *object = (RtApiCore *) info->object;
\r
843 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
844 return kAudioHardwareUnspecifiedError;
\r
846 return kAudioHardwareNoError;
\r
849 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
851 const AudioObjectPropertyAddress properties[],
\r
852 void* handlePointer )
\r
854 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
855 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
856 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
857 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
858 handle->xrun[1] = true;
\r
860 handle->xrun[0] = true;
\r
864 return kAudioHardwareNoError;
\r
867 static OSStatus rateListener( AudioObjectID inDevice,
\r
868 UInt32 /*nAddresses*/,
\r
869 const AudioObjectPropertyAddress /*properties*/[],
\r
870 void* ratePointer )
\r
872 Float64 *rate = (Float64 *) ratePointer;
\r
873 UInt32 dataSize = sizeof( Float64 );
\r
874 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
875 kAudioObjectPropertyScopeGlobal,
\r
876 kAudioObjectPropertyElementMaster };
\r
877 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
878 return kAudioHardwareNoError;
\r
881 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
882 unsigned int firstChannel, unsigned int sampleRate,
\r
883 RtAudioFormat format, unsigned int *bufferSize,
\r
884 RtAudio::StreamOptions *options )
\r
887 unsigned int nDevices = getDeviceCount();
\r
888 if ( nDevices == 0 ) {
\r
889 // This should not happen because a check is made before this function is called.
\r
890 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
894 if ( device >= nDevices ) {
\r
895 // This should not happen because a check is made before this function is called.
\r
896 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
900 AudioDeviceID deviceList[ nDevices ];
\r
901 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
902 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
903 kAudioObjectPropertyScopeGlobal,
\r
904 kAudioObjectPropertyElementMaster };
\r
905 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
906 0, NULL, &dataSize, (void *) &deviceList );
\r
907 if ( result != noErr ) {
\r
908 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
912 AudioDeviceID id = deviceList[ device ];
\r
914 // Setup for stream mode.
\r
915 bool isInput = false;
\r
916 if ( mode == INPUT ) {
\r
918 property.mScope = kAudioDevicePropertyScopeInput;
\r
921 property.mScope = kAudioDevicePropertyScopeOutput;
\r
923 // Get the stream "configuration".
\r
924 AudioBufferList *bufferList = nil;
\r
926 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
927 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
928 if ( result != noErr || dataSize == 0 ) {
\r
929 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
930 errorText_ = errorStream_.str();
\r
934 // Allocate the AudioBufferList.
\r
935 bufferList = (AudioBufferList *) malloc( dataSize );
\r
936 if ( bufferList == NULL ) {
\r
937 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
941 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
942 if (result != noErr || dataSize == 0) {
\r
943 free( bufferList );
\r
944 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
945 errorText_ = errorStream_.str();
\r
949 // Search for one or more streams that contain the desired number of
\r
950 // channels. CoreAudio devices can have an arbitrary number of
\r
951 // streams and each stream can have an arbitrary number of channels.
\r
952 // For each stream, a single buffer of interleaved samples is
\r
953 // provided. RtAudio prefers the use of one stream of interleaved
\r
954 // data or multiple consecutive single-channel streams. However, we
\r
955 // now support multiple consecutive multi-channel streams of
\r
956 // interleaved data as well.
\r
957 UInt32 iStream, offsetCounter = firstChannel;
\r
958 UInt32 nStreams = bufferList->mNumberBuffers;
\r
959 bool monoMode = false;
\r
960 bool foundStream = false;
\r
962 // First check that the device supports the requested number of
\r
964 UInt32 deviceChannels = 0;
\r
965 for ( iStream=0; iStream<nStreams; iStream++ )
\r
966 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
968 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
969 free( bufferList );
\r
970 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
971 errorText_ = errorStream_.str();
\r
975 // Look for a single stream meeting our needs.
\r
976 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
977 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
978 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
979 if ( streamChannels >= channels + offsetCounter ) {
\r
980 firstStream = iStream;
\r
981 channelOffset = offsetCounter;
\r
982 foundStream = true;
\r
985 if ( streamChannels > offsetCounter ) break;
\r
986 offsetCounter -= streamChannels;
\r
989 // If we didn't find a single stream above, then we should be able
\r
990 // to meet the channel specification with multiple streams.
\r
991 if ( foundStream == false ) {
\r
993 offsetCounter = firstChannel;
\r
994 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
995 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
996 if ( streamChannels > offsetCounter ) break;
\r
997 offsetCounter -= streamChannels;
\r
1000 firstStream = iStream;
\r
1001 channelOffset = offsetCounter;
\r
1002 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1004 if ( streamChannels > 1 ) monoMode = false;
\r
1005 while ( channelCounter > 0 ) {
\r
1006 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1007 if ( streamChannels > 1 ) monoMode = false;
\r
1008 channelCounter -= streamChannels;
\r
1013 free( bufferList );
\r
1015 // Determine the buffer size.
\r
1016 AudioValueRange bufferRange;
\r
1017 dataSize = sizeof( AudioValueRange );
\r
1018 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1019 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1021 if ( result != noErr ) {
\r
1022 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1023 errorText_ = errorStream_.str();
\r
1027 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1028 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1029 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1031 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1032 // need to make this setting for the master channel.
\r
1033 UInt32 theSize = (UInt32) *bufferSize;
\r
1034 dataSize = sizeof( UInt32 );
\r
1035 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1036 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1038 if ( result != noErr ) {
\r
1039 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1040 errorText_ = errorStream_.str();
\r
1044 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1045 // MUST be the same in both directions!
\r
1046 *bufferSize = theSize;
\r
1047 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1048 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1049 errorText_ = errorStream_.str();
\r
1053 stream_.bufferSize = *bufferSize;
\r
1054 stream_.nBuffers = 1;
\r
1056 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1057 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1059 dataSize = sizeof( hog_pid );
\r
1060 property.mSelector = kAudioDevicePropertyHogMode;
\r
1061 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1062 if ( result != noErr ) {
\r
1063 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1064 errorText_ = errorStream_.str();
\r
1068 if ( hog_pid != getpid() ) {
\r
1069 hog_pid = getpid();
\r
1070 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1071 if ( result != noErr ) {
\r
1072 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1073 errorText_ = errorStream_.str();
\r
1079 // Check and if necessary, change the sample rate for the device.
\r
1080 Float64 nominalRate;
\r
1081 dataSize = sizeof( Float64 );
\r
1082 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1083 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1084 if ( result != noErr ) {
\r
1085 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1086 errorText_ = errorStream_.str();
\r
1090 // Only change the sample rate if off by more than 1 Hz.
\r
1091 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1093 // Set a property listener for the sample rate change
\r
1094 Float64 reportedRate = 0.0;
\r
1095 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1096 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1097 if ( result != noErr ) {
\r
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1099 errorText_ = errorStream_.str();
\r
1103 nominalRate = (Float64) sampleRate;
\r
1104 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1105 if ( result != noErr ) {
\r
1106 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1108 errorText_ = errorStream_.str();
\r
1112 // Now wait until the reported nominal rate is what we just set.
\r
1113 UInt32 microCounter = 0;
\r
1114 while ( reportedRate != nominalRate ) {
\r
1115 microCounter += 5000;
\r
1116 if ( microCounter > 5000000 ) break;
\r
1120 // Remove the property listener.
\r
1121 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1123 if ( microCounter > 5000000 ) {
\r
1124 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1125 errorText_ = errorStream_.str();
\r
1130 // Now set the stream format for all streams. Also, check the
\r
1131 // physical format of the device and change that if necessary.
\r
1132 AudioStreamBasicDescription description;
\r
1133 dataSize = sizeof( AudioStreamBasicDescription );
\r
1134 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1135 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1136 if ( result != noErr ) {
\r
1137 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1138 errorText_ = errorStream_.str();
\r
1142 // Set the sample rate and data format id. However, only make the
\r
1143 // change if the sample rate is not within 1.0 of the desired
\r
1144 // rate and the format is not linear pcm.
\r
1145 bool updateFormat = false;
\r
1146 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1147 description.mSampleRate = (Float64) sampleRate;
\r
1148 updateFormat = true;
\r
1151 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1152 description.mFormatID = kAudioFormatLinearPCM;
\r
1153 updateFormat = true;
\r
1156 if ( updateFormat ) {
\r
1157 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1158 if ( result != noErr ) {
\r
1159 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1160 errorText_ = errorStream_.str();
\r
1165 // Now check the physical format.
\r
1166 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1167 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1168 if ( result != noErr ) {
\r
1169 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1170 errorText_ = errorStream_.str();
\r
1174 //std::cout << "Current physical stream format:" << std::endl;
\r
1175 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1176 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1177 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1178 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1180 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1181 description.mFormatID = kAudioFormatLinearPCM;
\r
1182 //description.mSampleRate = (Float64) sampleRate;
\r
1183 AudioStreamBasicDescription testDescription = description;
\r
1184 UInt32 formatFlags;
\r
1186 // We'll try higher bit rates first and then work our way down.
\r
1187 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1188 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1189 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1190 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1193 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1195 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1196 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1197 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1201 bool setPhysicalFormat = false;
\r
1202 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1203 testDescription = description;
\r
1204 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1205 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1206 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1207 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1210 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1211 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1212 if ( result == noErr ) {
\r
1213 setPhysicalFormat = true;
\r
1214 //std::cout << "Updated physical stream format:" << std::endl;
\r
1215 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1216 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1217 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1218 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1223 if ( !setPhysicalFormat ) {
\r
1224 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1225 errorText_ = errorStream_.str();
\r
1228 } // done setting virtual/physical formats.
\r
1230 // Get the stream / device latency.
\r
1232 dataSize = sizeof( UInt32 );
\r
1233 property.mSelector = kAudioDevicePropertyLatency;
\r
1234 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1235 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1236 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1238 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1239 errorText_ = errorStream_.str();
\r
1240 error( RtAudioError::WARNING );
\r
1244 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1245 // always be presented in native-endian format, so we should never
\r
1246 // need to byte swap.
\r
1247 stream_.doByteSwap[mode] = false;
\r
1249 // From the CoreAudio documentation, PCM data must be supplied as
\r
1251 stream_.userFormat = format;
\r
1252 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1254 if ( streamCount == 1 )
\r
1255 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1256 else // multiple streams
\r
1257 stream_.nDeviceChannels[mode] = channels;
\r
1258 stream_.nUserChannels[mode] = channels;
\r
1259 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1260 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1261 else stream_.userInterleaved = true;
\r
1262 stream_.deviceInterleaved[mode] = true;
\r
1263 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1265 // Set flags for buffer conversion.
\r
1266 stream_.doConvertBuffer[mode] = false;
\r
1267 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1268 stream_.doConvertBuffer[mode] = true;
\r
1269 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1270 stream_.doConvertBuffer[mode] = true;
\r
1271 if ( streamCount == 1 ) {
\r
1272 if ( stream_.nUserChannels[mode] > 1 &&
\r
1273 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1274 stream_.doConvertBuffer[mode] = true;
\r
1276 else if ( monoMode && stream_.userInterleaved )
\r
1277 stream_.doConvertBuffer[mode] = true;
\r
1279 // Allocate our CoreHandle structure for the stream.
\r
1280 CoreHandle *handle = 0;
\r
1281 if ( stream_.apiHandle == 0 ) {
\r
1283 handle = new CoreHandle;
\r
1285 catch ( std::bad_alloc& ) {
\r
1286 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1290 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1291 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1294 stream_.apiHandle = (void *) handle;
\r
1297 handle = (CoreHandle *) stream_.apiHandle;
\r
1298 handle->iStream[mode] = firstStream;
\r
1299 handle->nStreams[mode] = streamCount;
\r
1300 handle->id[mode] = id;
\r
1302 // Allocate necessary internal buffers.
\r
1303 unsigned long bufferBytes;
\r
1304 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1305 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1306 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1307 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1308 if ( stream_.userBuffer[mode] == NULL ) {
\r
1309 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1313 // If possible, we will make use of the CoreAudio stream buffers as
\r
1314 // "device buffers". However, we can't do this if using multiple
\r
1316 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1318 bool makeBuffer = true;
\r
1319 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1320 if ( mode == INPUT ) {
\r
1321 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1322 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1323 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1327 if ( makeBuffer ) {
\r
1328 bufferBytes *= *bufferSize;
\r
1329 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1330 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1331 if ( stream_.deviceBuffer == NULL ) {
\r
1332 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1338 stream_.sampleRate = sampleRate;
\r
1339 stream_.device[mode] = device;
\r
1340 stream_.state = STREAM_STOPPED;
\r
1341 stream_.callbackInfo.object = (void *) this;
\r
1343 // Setup the buffer conversion information structure.
\r
1344 if ( stream_.doConvertBuffer[mode] ) {
\r
1345 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1346 else setConvertInfo( mode, channelOffset );
\r
1349 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1350 // Only one callback procedure per device.
\r
1351 stream_.mode = DUPLEX;
\r
1353 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1354 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1356 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1357 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1359 if ( result != noErr ) {
\r
1360 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1361 errorText_ = errorStream_.str();
\r
1364 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1365 stream_.mode = DUPLEX;
\r
1367 stream_.mode = mode;
\r
1370 // Setup the device property listener for over/underload.
\r
1371 property.mSelector = kAudioDeviceProcessorOverload;
\r
1372 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1373 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1379 pthread_cond_destroy( &handle->condition );
\r
1381 stream_.apiHandle = 0;
\r
1384 for ( int i=0; i<2; i++ ) {
\r
1385 if ( stream_.userBuffer[i] ) {
\r
1386 free( stream_.userBuffer[i] );
\r
1387 stream_.userBuffer[i] = 0;
\r
1391 if ( stream_.deviceBuffer ) {
\r
1392 free( stream_.deviceBuffer );
\r
1393 stream_.deviceBuffer = 0;
\r
1396 stream_.state = STREAM_CLOSED;
\r
1400 void RtApiCore :: closeStream( void )
\r
1402 if ( stream_.state == STREAM_CLOSED ) {
\r
1403 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1404 error( RtAudioError::WARNING );
\r
1408 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1409 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1411 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1412 kAudioObjectPropertyScopeGlobal,
\r
1413 kAudioObjectPropertyElementMaster };
\r
1415 property.mSelector = kAudioDeviceProcessorOverload;
\r
1416 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1417 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1418 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1419 error( RtAudioError::WARNING );
\r
1422 if ( stream_.state == STREAM_RUNNING )
\r
1423 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1424 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1425 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1427 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1428 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1432 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1434 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1435 kAudioObjectPropertyScopeGlobal,
\r
1436 kAudioObjectPropertyElementMaster };
\r
1438 property.mSelector = kAudioDeviceProcessorOverload;
\r
1439 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1440 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1441 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1442 error( RtAudioError::WARNING );
\r
1445 if ( stream_.state == STREAM_RUNNING )
\r
1446 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1447 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1448 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1450 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1451 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1455 for ( int i=0; i<2; i++ ) {
\r
1456 if ( stream_.userBuffer[i] ) {
\r
1457 free( stream_.userBuffer[i] );
\r
1458 stream_.userBuffer[i] = 0;
\r
1462 if ( stream_.deviceBuffer ) {
\r
1463 free( stream_.deviceBuffer );
\r
1464 stream_.deviceBuffer = 0;
\r
1467 // Destroy pthread condition variable.
\r
1468 pthread_cond_destroy( &handle->condition );
\r
1470 stream_.apiHandle = 0;
\r
1472 stream_.mode = UNINITIALIZED;
\r
1473 stream_.state = STREAM_CLOSED;
\r
1476 void RtApiCore :: startStream( void )
\r
1479 if ( stream_.state == STREAM_RUNNING ) {
\r
1480 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1481 error( RtAudioError::WARNING );
\r
1485 OSStatus result = noErr;
\r
1486 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1489 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1490 if ( result != noErr ) {
\r
1491 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1492 errorText_ = errorStream_.str();
\r
1497 if ( stream_.mode == INPUT ||
\r
1498 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1500 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1501 if ( result != noErr ) {
\r
1502 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1503 errorText_ = errorStream_.str();
\r
1508 handle->drainCounter = 0;
\r
1509 handle->internalDrain = false;
\r
1510 stream_.state = STREAM_RUNNING;
\r
1513 if ( result == noErr ) return;
\r
1514 error( RtAudioError::SYSTEM_ERROR );
\r
1517 void RtApiCore :: stopStream( void )
\r
1520 if ( stream_.state == STREAM_STOPPED ) {
\r
1521 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1522 error( RtAudioError::WARNING );
\r
1526 OSStatus result = noErr;
\r
1527 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1528 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1530 if ( handle->drainCounter == 0 ) {
\r
1531 handle->drainCounter = 2;
\r
1532 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1535 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1536 if ( result != noErr ) {
\r
1537 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1538 errorText_ = errorStream_.str();
\r
1543 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1545 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1546 if ( result != noErr ) {
\r
1547 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1548 errorText_ = errorStream_.str();
\r
1553 stream_.state = STREAM_STOPPED;
\r
1556 if ( result == noErr ) return;
\r
1557 error( RtAudioError::SYSTEM_ERROR );
\r
1560 void RtApiCore :: abortStream( void )
\r
1563 if ( stream_.state == STREAM_STOPPED ) {
\r
1564 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1565 error( RtAudioError::WARNING );
\r
1569 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1570 handle->drainCounter = 2;
\r
1575 // This function will be called by a spawned thread when the user
\r
1576 // callback function signals that the stream should be stopped or
\r
1577 // aborted. It is better to handle it this way because the
\r
1578 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1579 // function is called.
\r
1580 static void *coreStopStream( void *ptr )
\r
1582 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1583 RtApiCore *object = (RtApiCore *) info->object;
\r
1585 object->stopStream();
\r
1586 pthread_exit( NULL );
\r
1589 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1590 const AudioBufferList *inBufferList,
\r
1591 const AudioBufferList *outBufferList )
\r
1593 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1594 if ( stream_.state == STREAM_CLOSED ) {
\r
1595 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1596 error( RtAudioError::WARNING );
\r
1600 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1601 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1603 // Check if we were draining the stream and signal is finished.
\r
1604 if ( handle->drainCounter > 3 ) {
\r
1605 ThreadHandle threadId;
\r
1607 stream_.state = STREAM_STOPPING;
\r
1608 if ( handle->internalDrain == true )
\r
1609 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1610 else // external call to stopStream()
\r
1611 pthread_cond_signal( &handle->condition );
\r
1615 AudioDeviceID outputDevice = handle->id[0];
\r
1617 // Invoke user callback to get fresh output data UNLESS we are
\r
1618 // draining stream or duplex mode AND the input/output devices are
\r
1619 // different AND this function is called for the input device.
\r
1620 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1621 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1622 double streamTime = getStreamTime();
\r
1623 RtAudioStreamStatus status = 0;
\r
1624 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1625 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1626 handle->xrun[0] = false;
\r
1628 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1629 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1630 handle->xrun[1] = false;
\r
1633 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1634 stream_.bufferSize, streamTime, status, info->userData );
\r
1635 if ( cbReturnValue == 2 ) {
\r
1636 stream_.state = STREAM_STOPPING;
\r
1637 handle->drainCounter = 2;
\r
1641 else if ( cbReturnValue == 1 ) {
\r
1642 handle->drainCounter = 1;
\r
1643 handle->internalDrain = true;
\r
1647 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1649 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1651 if ( handle->nStreams[0] == 1 ) {
\r
1652 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1654 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1656 else { // fill multiple streams with zeros
\r
1657 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1658 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1660 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1664 else if ( handle->nStreams[0] == 1 ) {
\r
1665 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1666 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1667 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1669 else { // copy from user buffer
\r
1670 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1671 stream_.userBuffer[0],
\r
1672 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1675 else { // fill multiple streams
\r
1676 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1677 if ( stream_.doConvertBuffer[0] ) {
\r
1678 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1679 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1682 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1683 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1684 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1685 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1686 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1689 else { // fill multiple multi-channel streams with interleaved data
\r
1690 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1691 Float32 *out, *in;
\r
1693 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1694 UInt32 inChannels = stream_.nUserChannels[0];
\r
1695 if ( stream_.doConvertBuffer[0] ) {
\r
1696 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1697 inChannels = stream_.nDeviceChannels[0];
\r
1700 if ( inInterleaved ) inOffset = 1;
\r
1701 else inOffset = stream_.bufferSize;
\r
1703 channelsLeft = inChannels;
\r
1704 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1706 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1707 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1710 // Account for possible channel offset in first stream
\r
1711 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1712 streamChannels -= stream_.channelOffset[0];
\r
1713 outJump = stream_.channelOffset[0];
\r
1717 // Account for possible unfilled channels at end of the last stream
\r
1718 if ( streamChannels > channelsLeft ) {
\r
1719 outJump = streamChannels - channelsLeft;
\r
1720 streamChannels = channelsLeft;
\r
1723 // Determine input buffer offsets and skips
\r
1724 if ( inInterleaved ) {
\r
1725 inJump = inChannels;
\r
1726 in += inChannels - channelsLeft;
\r
1730 in += (inChannels - channelsLeft) * inOffset;
\r
1733 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1734 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1735 *out++ = in[j*inOffset];
\r
1740 channelsLeft -= streamChannels;
\r
1746 // Don't bother draining input
\r
1747 if ( handle->drainCounter ) {
\r
1748 handle->drainCounter++;
\r
1752 AudioDeviceID inputDevice;
\r
1753 inputDevice = handle->id[1];
\r
1754 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1756 if ( handle->nStreams[1] == 1 ) {
\r
1757 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1758 convertBuffer( stream_.userBuffer[1],
\r
1759 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1760 stream_.convertInfo[1] );
\r
1762 else { // copy to user buffer
\r
1763 memcpy( stream_.userBuffer[1],
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1765 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1768 else { // read from multiple streams
\r
1769 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1770 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1772 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1773 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1774 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1775 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1776 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1779 else { // read from multiple multi-channel streams
\r
1780 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1781 Float32 *out, *in;
\r
1783 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1784 UInt32 outChannels = stream_.nUserChannels[1];
\r
1785 if ( stream_.doConvertBuffer[1] ) {
\r
1786 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1787 outChannels = stream_.nDeviceChannels[1];
\r
1790 if ( outInterleaved ) outOffset = 1;
\r
1791 else outOffset = stream_.bufferSize;
\r
1793 channelsLeft = outChannels;
\r
1794 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1796 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1797 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1800 // Account for possible channel offset in first stream
\r
1801 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1802 streamChannels -= stream_.channelOffset[1];
\r
1803 inJump = stream_.channelOffset[1];
\r
1807 // Account for possible unread channels at end of the last stream
\r
1808 if ( streamChannels > channelsLeft ) {
\r
1809 inJump = streamChannels - channelsLeft;
\r
1810 streamChannels = channelsLeft;
\r
1813 // Determine output buffer offsets and skips
\r
1814 if ( outInterleaved ) {
\r
1815 outJump = outChannels;
\r
1816 out += outChannels - channelsLeft;
\r
1820 out += (outChannels - channelsLeft) * outOffset;
\r
1823 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1824 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1825 out[j*outOffset] = *in++;
\r
1830 channelsLeft -= streamChannels;
\r
1834 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1835 convertBuffer( stream_.userBuffer[1],
\r
1836 stream_.deviceBuffer,
\r
1837 stream_.convertInfo[1] );
\r
1843 //MUTEX_UNLOCK( &stream_.mutex );
\r
1845 RtApi::tickStreamTime();
\r
1849 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1853 case kAudioHardwareNotRunningError:
\r
1854 return "kAudioHardwareNotRunningError";
\r
1856 case kAudioHardwareUnspecifiedError:
\r
1857 return "kAudioHardwareUnspecifiedError";
\r
1859 case kAudioHardwareUnknownPropertyError:
\r
1860 return "kAudioHardwareUnknownPropertyError";
\r
1862 case kAudioHardwareBadPropertySizeError:
\r
1863 return "kAudioHardwareBadPropertySizeError";
\r
1865 case kAudioHardwareIllegalOperationError:
\r
1866 return "kAudioHardwareIllegalOperationError";
\r
1868 case kAudioHardwareBadObjectError:
\r
1869 return "kAudioHardwareBadObjectError";
\r
1871 case kAudioHardwareBadDeviceError:
\r
1872 return "kAudioHardwareBadDeviceError";
\r
1874 case kAudioHardwareBadStreamError:
\r
1875 return "kAudioHardwareBadStreamError";
\r
1877 case kAudioHardwareUnsupportedOperationError:
\r
1878 return "kAudioHardwareUnsupportedOperationError";
\r
1880 case kAudioDeviceUnsupportedFormatError:
\r
1881 return "kAudioDeviceUnsupportedFormatError";
\r
1883 case kAudioDevicePermissionsError:
\r
1884 return "kAudioDevicePermissionsError";
\r
1887 return "CoreAudio unknown error";
\r
1891 //******************** End of __MACOSX_CORE__ *********************//
\r
1894 #if defined(__UNIX_JACK__)
\r
1896 // JACK is a low-latency audio server, originally written for the
\r
1897 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1898 // connect a number of different applications to an audio device, as
\r
1899 // well as allowing them to share audio between themselves.
\r
1901 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1902 // have ports connected to the server. The JACK server is typically
\r
1903 // started in a terminal as follows:
\r
1905 // .jackd -d alsa -d hw:0
\r
1907 // or through an interface program such as qjackctl. Many of the
\r
1908 // parameters normally set for a stream are fixed by the JACK server
\r
1909 // and can be specified when the JACK server is started. In
\r
1912 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1914 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1915 // frames, and number of buffers = 4. Once the server is running, it
\r
1916 // is not possible to override these values. If the values are not
\r
1917 // specified in the command-line, the JACK server uses default values.
\r
1919 // The JACK server does not have to be running when an instance of
\r
1920 // RtApiJack is created, though the function getDeviceCount() will
\r
1921 // report 0 devices found until JACK has been started. When no
\r
1922 // devices are available (i.e., the JACK server is not running), a
\r
1923 // stream cannot be opened.
\r
1925 #include <jack/jack.h>
\r
1926 #include <unistd.h>
\r
1929 // A structure to hold various information related to the Jack API
\r
1930 // implementation.
\r
1931 struct JackHandle {
\r
1932 jack_client_t *client;
\r
1933 jack_port_t **ports[2];
\r
1934 std::string deviceName[2];
\r
1936 pthread_cond_t condition;
\r
1937 int drainCounter; // Tracks callback counts when draining
\r
1938 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1941 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1944 static void jackSilentError( const char * ) {};
\r
1946 RtApiJack :: RtApiJack()
\r
1948 // Nothing to do here.
\r
1949 #if !defined(__RTAUDIO_DEBUG__)
\r
1950 // Turn off Jack's internal error reporting.
\r
1951 jack_set_error_function( &jackSilentError );
\r
1955 RtApiJack :: ~RtApiJack()
\r
1957 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1960 unsigned int RtApiJack :: getDeviceCount( void )
\r
1962 // See if we can become a jack client.
\r
1963 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1964 jack_status_t *status = NULL;
\r
1965 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1966 if ( client == 0 ) return 0;
\r
1968 const char **ports;
\r
1969 std::string port, previousPort;
\r
1970 unsigned int nChannels = 0, nDevices = 0;
\r
1971 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1973 // Parse the port names up to the first colon (:).
\r
1974 size_t iColon = 0;
\r
1976 port = (char *) ports[ nChannels ];
\r
1977 iColon = port.find(":");
\r
1978 if ( iColon != std::string::npos ) {
\r
1979 port = port.substr( 0, iColon + 1 );
\r
1980 if ( port != previousPort ) {
\r
1982 previousPort = port;
\r
1985 } while ( ports[++nChannels] );
\r
1989 jack_client_close( client );
\r
1993 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1995 RtAudio::DeviceInfo info;
\r
1996 info.probed = false;
\r
1998 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1999 jack_status_t *status = NULL;
\r
2000 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2001 if ( client == 0 ) {
\r
2002 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2003 error( RtAudioError::WARNING );
\r
2007 const char **ports;
\r
2008 std::string port, previousPort;
\r
2009 unsigned int nPorts = 0, nDevices = 0;
\r
2010 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2012 // Parse the port names up to the first colon (:).
\r
2013 size_t iColon = 0;
\r
2015 port = (char *) ports[ nPorts ];
\r
2016 iColon = port.find(":");
\r
2017 if ( iColon != std::string::npos ) {
\r
2018 port = port.substr( 0, iColon );
\r
2019 if ( port != previousPort ) {
\r
2020 if ( nDevices == device ) info.name = port;
\r
2022 previousPort = port;
\r
2025 } while ( ports[++nPorts] );
\r
2029 if ( device >= nDevices ) {
\r
2030 jack_client_close( client );
\r
2031 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2032 error( RtAudioError::INVALID_USE );
\r
2036 // Get the current jack server sample rate.
\r
2037 info.sampleRates.clear();
\r
2039 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2040 info.sampleRates.push_back( info.preferredSampleRate );
\r
2042 // Count the available ports containing the client name as device
\r
2043 // channels. Jack "input ports" equal RtAudio output channels.
\r
2044 unsigned int nChannels = 0;
\r
2045 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2047 while ( ports[ nChannels ] ) nChannels++;
\r
2049 info.outputChannels = nChannels;
\r
2052 // Jack "output ports" equal RtAudio input channels.
\r
2054 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2056 while ( ports[ nChannels ] ) nChannels++;
\r
2058 info.inputChannels = nChannels;
\r
2061 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2062 jack_client_close(client);
\r
2063 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2064 error( RtAudioError::WARNING );
\r
2068 // If device opens for both playback and capture, we determine the channels.
\r
2069 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2070 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2072 // Jack always uses 32-bit floats.
\r
2073 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2075 // Jack doesn't provide default devices so we'll use the first available one.
\r
2076 if ( device == 0 && info.outputChannels > 0 )
\r
2077 info.isDefaultOutput = true;
\r
2078 if ( device == 0 && info.inputChannels > 0 )
\r
2079 info.isDefaultInput = true;
\r
2081 jack_client_close(client);
\r
2082 info.probed = true;
\r
2086 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2088 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2090 RtApiJack *object = (RtApiJack *) info->object;
\r
2091 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2096 // This function will be called by a spawned thread when the Jack
\r
2097 // server signals that it is shutting down. It is necessary to handle
\r
2098 // it this way because the jackShutdown() function must return before
\r
2099 // the jack_deactivate() function (in closeStream()) will return.
\r
2100 static void *jackCloseStream( void *ptr )
\r
2102 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2103 RtApiJack *object = (RtApiJack *) info->object;
\r
2105 object->closeStream();
\r
2107 pthread_exit( NULL );
\r
2109 static void jackShutdown( void *infoPointer )
\r
2111 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2112 RtApiJack *object = (RtApiJack *) info->object;
\r
2114 // Check current stream state. If stopped, then we'll assume this
\r
2115 // was called as a result of a call to RtApiJack::stopStream (the
\r
2116 // deactivation of a client handle causes this function to be called).
\r
2117 // If not, we'll assume the Jack server is shutting down or some
\r
2118 // other problem occurred and we should close the stream.
\r
2119 if ( object->isStreamRunning() == false ) return;
\r
2121 ThreadHandle threadId;
\r
2122 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2123 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2126 static int jackXrun( void *infoPointer )
\r
2128 JackHandle *handle = (JackHandle *) infoPointer;
\r
2130 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2131 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2136 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2137 unsigned int firstChannel, unsigned int sampleRate,
\r
2138 RtAudioFormat format, unsigned int *bufferSize,
\r
2139 RtAudio::StreamOptions *options )
\r
2141 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2143 // Look for jack server and try to become a client (only do once per stream).
\r
2144 jack_client_t *client = 0;
\r
2145 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2146 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2147 jack_status_t *status = NULL;
\r
2148 if ( options && !options->streamName.empty() )
\r
2149 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2151 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2152 if ( client == 0 ) {
\r
2153 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2154 error( RtAudioError::WARNING );
\r
2159 // The handle must have been created on an earlier pass.
\r
2160 client = handle->client;
\r
2163 const char **ports;
\r
2164 std::string port, previousPort, deviceName;
\r
2165 unsigned int nPorts = 0, nDevices = 0;
\r
2166 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2168 // Parse the port names up to the first colon (:).
\r
2169 size_t iColon = 0;
\r
2171 port = (char *) ports[ nPorts ];
\r
2172 iColon = port.find(":");
\r
2173 if ( iColon != std::string::npos ) {
\r
2174 port = port.substr( 0, iColon );
\r
2175 if ( port != previousPort ) {
\r
2176 if ( nDevices == device ) deviceName = port;
\r
2178 previousPort = port;
\r
2181 } while ( ports[++nPorts] );
\r
2185 if ( device >= nDevices ) {
\r
2186 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2190 // Count the available ports containing the client name as device
\r
2191 // channels. Jack "input ports" equal RtAudio output channels.
\r
2192 unsigned int nChannels = 0;
\r
2193 unsigned long flag = JackPortIsInput;
\r
2194 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2195 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2197 while ( ports[ nChannels ] ) nChannels++;
\r
2201 // Compare the jack ports for specified client to the requested number of channels.
\r
2202 if ( nChannels < (channels + firstChannel) ) {
\r
2203 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2204 errorText_ = errorStream_.str();
\r
2208 // Check the jack server sample rate.
\r
2209 unsigned int jackRate = jack_get_sample_rate( client );
\r
2210 if ( sampleRate != jackRate ) {
\r
2211 jack_client_close( client );
\r
2212 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2213 errorText_ = errorStream_.str();
\r
2216 stream_.sampleRate = jackRate;
\r
2218 // Get the latency of the JACK port.
\r
2219 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2220 if ( ports[ firstChannel ] ) {
\r
2221 // Added by Ge Wang
\r
2222 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2223 // the range (usually the min and max are equal)
\r
2224 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2225 // get the latency range
\r
2226 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2227 // be optimistic, use the min!
\r
2228 stream_.latency[mode] = latrange.min;
\r
2229 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2233 // The jack server always uses 32-bit floating-point data.
\r
2234 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2235 stream_.userFormat = format;
\r
2237 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2238 else stream_.userInterleaved = true;
\r
2240 // Jack always uses non-interleaved buffers.
\r
2241 stream_.deviceInterleaved[mode] = false;
\r
2243 // Jack always provides host byte-ordered data.
\r
2244 stream_.doByteSwap[mode] = false;
\r
2246 // Get the buffer size. The buffer size and number of buffers
\r
2247 // (periods) is set when the jack server is started.
\r
2248 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2249 *bufferSize = stream_.bufferSize;
\r
2251 stream_.nDeviceChannels[mode] = channels;
\r
2252 stream_.nUserChannels[mode] = channels;
\r
2254 // Set flags for buffer conversion.
\r
2255 stream_.doConvertBuffer[mode] = false;
\r
2256 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2257 stream_.doConvertBuffer[mode] = true;
\r
2258 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2259 stream_.nUserChannels[mode] > 1 )
\r
2260 stream_.doConvertBuffer[mode] = true;
\r
2262 // Allocate our JackHandle structure for the stream.
\r
2263 if ( handle == 0 ) {
\r
2265 handle = new JackHandle;
\r
2267 catch ( std::bad_alloc& ) {
\r
2268 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2272 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2273 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2276 stream_.apiHandle = (void *) handle;
\r
2277 handle->client = client;
\r
2279 handle->deviceName[mode] = deviceName;
\r
2281 // Allocate necessary internal buffers.
\r
2282 unsigned long bufferBytes;
\r
2283 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2284 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2285 if ( stream_.userBuffer[mode] == NULL ) {
\r
2286 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2290 if ( stream_.doConvertBuffer[mode] ) {
\r
2292 bool makeBuffer = true;
\r
2293 if ( mode == OUTPUT )
\r
2294 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2295 else { // mode == INPUT
\r
2296 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2297 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2298 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2299 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2303 if ( makeBuffer ) {
\r
2304 bufferBytes *= *bufferSize;
\r
2305 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2306 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2307 if ( stream_.deviceBuffer == NULL ) {
\r
2308 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2314 // Allocate memory for the Jack ports (channels) identifiers.
\r
2315 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2316 if ( handle->ports[mode] == NULL ) {
\r
2317 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2321 stream_.device[mode] = device;
\r
2322 stream_.channelOffset[mode] = firstChannel;
\r
2323 stream_.state = STREAM_STOPPED;
\r
2324 stream_.callbackInfo.object = (void *) this;
\r
2326 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2327 // We had already set up the stream for output.
\r
2328 stream_.mode = DUPLEX;
\r
2330 stream_.mode = mode;
\r
2331 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2332 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2333 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2336 // Register our ports.
\r
2338 if ( mode == OUTPUT ) {
\r
2339 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2340 snprintf( label, 64, "outport %d", i );
\r
2341 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2342 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2346 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2347 snprintf( label, 64, "inport %d", i );
\r
2348 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2353 // Setup the buffer conversion information structure. We don't use
\r
2354 // buffers to do channel offsets, so we override that parameter
\r
2356 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2362 pthread_cond_destroy( &handle->condition );
\r
2363 jack_client_close( handle->client );
\r
2365 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2366 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2369 stream_.apiHandle = 0;
\r
2372 for ( int i=0; i<2; i++ ) {
\r
2373 if ( stream_.userBuffer[i] ) {
\r
2374 free( stream_.userBuffer[i] );
\r
2375 stream_.userBuffer[i] = 0;
\r
2379 if ( stream_.deviceBuffer ) {
\r
2380 free( stream_.deviceBuffer );
\r
2381 stream_.deviceBuffer = 0;
\r
2387 void RtApiJack :: closeStream( void )
\r
2389 if ( stream_.state == STREAM_CLOSED ) {
\r
2390 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2391 error( RtAudioError::WARNING );
\r
2395 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2398 if ( stream_.state == STREAM_RUNNING )
\r
2399 jack_deactivate( handle->client );
\r
2401 jack_client_close( handle->client );
\r
2405 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2406 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2407 pthread_cond_destroy( &handle->condition );
\r
2409 stream_.apiHandle = 0;
\r
2412 for ( int i=0; i<2; i++ ) {
\r
2413 if ( stream_.userBuffer[i] ) {
\r
2414 free( stream_.userBuffer[i] );
\r
2415 stream_.userBuffer[i] = 0;
\r
2419 if ( stream_.deviceBuffer ) {
\r
2420 free( stream_.deviceBuffer );
\r
2421 stream_.deviceBuffer = 0;
\r
2424 stream_.mode = UNINITIALIZED;
\r
2425 stream_.state = STREAM_CLOSED;
\r
2428 void RtApiJack :: startStream( void )
\r
2431 if ( stream_.state == STREAM_RUNNING ) {
\r
2432 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2433 error( RtAudioError::WARNING );
\r
2437 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2438 int result = jack_activate( handle->client );
\r
2440 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2444 const char **ports;
\r
2446 // Get the list of available ports.
\r
2447 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2449 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2450 if ( ports == NULL) {
\r
2451 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2455 // Now make the port connections. Since RtAudio wasn't designed to
\r
2456 // allow the user to select particular channels of a device, we'll
\r
2457 // just open the first "nChannels" ports with offset.
\r
2458 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2460 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2461 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2464 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2471 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2473 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2474 if ( ports == NULL) {
\r
2475 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2479 // Now make the port connections. See note above.
\r
2480 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2482 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2483 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2486 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2493 handle->drainCounter = 0;
\r
2494 handle->internalDrain = false;
\r
2495 stream_.state = STREAM_RUNNING;
\r
2498 if ( result == 0 ) return;
\r
2499 error( RtAudioError::SYSTEM_ERROR );
\r
2502 void RtApiJack :: stopStream( void )
\r
2505 if ( stream_.state == STREAM_STOPPED ) {
\r
2506 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2507 error( RtAudioError::WARNING );
\r
2511 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2514 if ( handle->drainCounter == 0 ) {
\r
2515 handle->drainCounter = 2;
\r
2516 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2520 jack_deactivate( handle->client );
\r
2521 stream_.state = STREAM_STOPPED;
\r
2524 void RtApiJack :: abortStream( void )
\r
2527 if ( stream_.state == STREAM_STOPPED ) {
\r
2528 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2529 error( RtAudioError::WARNING );
\r
2533 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2534 handle->drainCounter = 2;
\r
2539 // This function will be called by a spawned thread when the user
\r
2540 // callback function signals that the stream should be stopped or
\r
2541 // aborted. It is necessary to handle it this way because the
\r
2542 // callbackEvent() function must return before the jack_deactivate()
\r
2543 // function will return.
\r
2544 static void *jackStopStream( void *ptr )
\r
2546 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2547 RtApiJack *object = (RtApiJack *) info->object;
\r
2549 object->stopStream();
\r
2550 pthread_exit( NULL );
\r
2553 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2555 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2556 if ( stream_.state == STREAM_CLOSED ) {
\r
2557 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2558 error( RtAudioError::WARNING );
\r
2561 if ( stream_.bufferSize != nframes ) {
\r
2562 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2563 error( RtAudioError::WARNING );
\r
2567 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2568 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2570 // Check if we were draining the stream and signal is finished.
\r
2571 if ( handle->drainCounter > 3 ) {
\r
2572 ThreadHandle threadId;
\r
2574 stream_.state = STREAM_STOPPING;
\r
2575 if ( handle->internalDrain == true )
\r
2576 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2578 pthread_cond_signal( &handle->condition );
\r
2582 // Invoke user callback first, to get fresh output data.
\r
2583 if ( handle->drainCounter == 0 ) {
\r
2584 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2585 double streamTime = getStreamTime();
\r
2586 RtAudioStreamStatus status = 0;
\r
2587 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2588 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2589 handle->xrun[0] = false;
\r
2591 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2592 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2593 handle->xrun[1] = false;
\r
2595 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2596 stream_.bufferSize, streamTime, status, info->userData );
\r
2597 if ( cbReturnValue == 2 ) {
\r
2598 stream_.state = STREAM_STOPPING;
\r
2599 handle->drainCounter = 2;
\r
2601 pthread_create( &id, NULL, jackStopStream, info );
\r
2604 else if ( cbReturnValue == 1 ) {
\r
2605 handle->drainCounter = 1;
\r
2606 handle->internalDrain = true;
\r
2610 jack_default_audio_sample_t *jackbuffer;
\r
2611 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2612 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2614 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2616 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2617 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2618 memset( jackbuffer, 0, bufferBytes );
\r
2622 else if ( stream_.doConvertBuffer[0] ) {
\r
2624 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2626 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2627 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2628 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2631 else { // no buffer conversion
\r
2632 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2633 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2634 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2639 // Don't bother draining input
\r
2640 if ( handle->drainCounter ) {
\r
2641 handle->drainCounter++;
\r
2645 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2647 if ( stream_.doConvertBuffer[1] ) {
\r
2648 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2649 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2650 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2652 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2654 else { // no buffer conversion
\r
2655 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2657 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2663 RtApi::tickStreamTime();
\r
2666 //******************** End of __UNIX_JACK__ *********************//
\r
2669 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2671 // The ASIO API is designed around a callback scheme, so this
\r
2672 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2673 // Jack. The primary constraint with ASIO is that it only allows
\r
2674 // access to a single driver at a time. Thus, it is not possible to
\r
2675 // have more than one simultaneous RtAudio stream.
\r
2677 // This implementation also requires a number of external ASIO files
\r
2678 // and a few global variables. The ASIO callback scheme does not
\r
2679 // allow for the passing of user data, so we must create a global
\r
2680 // pointer to our callbackInfo structure.
\r
2682 // On unix systems, we make use of a pthread condition variable.
\r
2683 // Since there is no equivalent in Windows, I hacked something based
\r
2684 // on information found in
\r
2685 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2687 #include "asiosys.h"
\r
2689 #include "iasiothiscallresolver.h"
\r
2690 #include "asiodrivers.h"
\r
2693 static AsioDrivers drivers;
\r
2694 static ASIOCallbacks asioCallbacks;
\r
2695 static ASIODriverInfo driverInfo;
\r
2696 static CallbackInfo *asioCallbackInfo;
\r
2697 static bool asioXRun;
\r
2699 struct AsioHandle {
\r
2700 int drainCounter; // Tracks callback counts when draining
\r
2701 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2702 ASIOBufferInfo *bufferInfos;
\r
2706 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2709 // Function declarations (definitions at end of section)
\r
2710 static const char* getAsioErrorString( ASIOError result );
\r
2711 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2712 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2714 RtApiAsio :: RtApiAsio()
\r
2716 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2717 // CoInitialize beforehand, but it must be for appartment threading
\r
2718 // (in which case, CoInitilialize will return S_FALSE here).
\r
2719 coInitialized_ = false;
\r
2720 HRESULT hr = CoInitialize( NULL );
\r
2721 if ( FAILED(hr) ) {
\r
2722 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2723 error( RtAudioError::WARNING );
\r
2725 coInitialized_ = true;
\r
2727 drivers.removeCurrentDriver();
\r
2728 driverInfo.asioVersion = 2;
\r
2730 // See note in DirectSound implementation about GetDesktopWindow().
\r
2731 driverInfo.sysRef = GetForegroundWindow();
\r
2734 RtApiAsio :: ~RtApiAsio()
\r
2736 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2737 if ( coInitialized_ ) CoUninitialize();
\r
2740 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2742 return (unsigned int) drivers.asioGetNumDev();
\r
2745 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2747 RtAudio::DeviceInfo info;
\r
2748 info.probed = false;
\r
2751 unsigned int nDevices = getDeviceCount();
\r
2752 if ( nDevices == 0 ) {
\r
2753 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2754 error( RtAudioError::INVALID_USE );
\r
2758 if ( device >= nDevices ) {
\r
2759 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2760 error( RtAudioError::INVALID_USE );
\r
2764 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2765 if ( stream_.state != STREAM_CLOSED ) {
\r
2766 if ( device >= devices_.size() ) {
\r
2767 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2768 error( RtAudioError::WARNING );
\r
2771 return devices_[ device ];
\r
2774 char driverName[32];
\r
2775 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2776 if ( result != ASE_OK ) {
\r
2777 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2778 errorText_ = errorStream_.str();
\r
2779 error( RtAudioError::WARNING );
\r
2783 info.name = driverName;
\r
2785 if ( !drivers.loadDriver( driverName ) ) {
\r
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2787 errorText_ = errorStream_.str();
\r
2788 error( RtAudioError::WARNING );
\r
2792 result = ASIOInit( &driverInfo );
\r
2793 if ( result != ASE_OK ) {
\r
2794 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2795 errorText_ = errorStream_.str();
\r
2796 error( RtAudioError::WARNING );
\r
2800 // Determine the device channel information.
\r
2801 long inputChannels, outputChannels;
\r
2802 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2803 if ( result != ASE_OK ) {
\r
2804 drivers.removeCurrentDriver();
\r
2805 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2806 errorText_ = errorStream_.str();
\r
2807 error( RtAudioError::WARNING );
\r
2811 info.outputChannels = outputChannels;
\r
2812 info.inputChannels = inputChannels;
\r
2813 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2814 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2816 // Determine the supported sample rates.
\r
2817 info.sampleRates.clear();
\r
2818 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2819 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2820 if ( result == ASE_OK ) {
\r
2821 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2823 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2824 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2828 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2829 ASIOChannelInfo channelInfo;
\r
2830 channelInfo.channel = 0;
\r
2831 channelInfo.isInput = true;
\r
2832 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2833 result = ASIOGetChannelInfo( &channelInfo );
\r
2834 if ( result != ASE_OK ) {
\r
2835 drivers.removeCurrentDriver();
\r
2836 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2837 errorText_ = errorStream_.str();
\r
2838 error( RtAudioError::WARNING );
\r
2842 info.nativeFormats = 0;
\r
2843 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2844 info.nativeFormats |= RTAUDIO_SINT16;
\r
2845 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2846 info.nativeFormats |= RTAUDIO_SINT32;
\r
2847 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2848 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2849 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2850 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2851 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2852 info.nativeFormats |= RTAUDIO_SINT24;
\r
2854 if ( info.outputChannels > 0 )
\r
2855 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2856 if ( info.inputChannels > 0 )
\r
2857 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2859 info.probed = true;
\r
2860 drivers.removeCurrentDriver();
\r
2864 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2866 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2867 object->callbackEvent( index );
\r
2870 void RtApiAsio :: saveDeviceInfo( void )
\r
2874 unsigned int nDevices = getDeviceCount();
\r
2875 devices_.resize( nDevices );
\r
2876 for ( unsigned int i=0; i<nDevices; i++ )
\r
2877 devices_[i] = getDeviceInfo( i );
\r
2880 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2881 unsigned int firstChannel, unsigned int sampleRate,
\r
2882 RtAudioFormat format, unsigned int *bufferSize,
\r
2883 RtAudio::StreamOptions *options )
\r
2884 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2886 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2888 // For ASIO, a duplex stream MUST use the same driver.
\r
2889 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2890 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2894 char driverName[32];
\r
2895 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2896 if ( result != ASE_OK ) {
\r
2897 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2898 errorText_ = errorStream_.str();
\r
2902 // Only load the driver once for duplex stream.
\r
2903 if ( !isDuplexInput ) {
\r
2904 // The getDeviceInfo() function will not work when a stream is open
\r
2905 // because ASIO does not allow multiple devices to run at the same
\r
2906 // time. Thus, we'll probe the system before opening a stream and
\r
2907 // save the results for use by getDeviceInfo().
\r
2908 this->saveDeviceInfo();
\r
2910 if ( !drivers.loadDriver( driverName ) ) {
\r
2911 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2912 errorText_ = errorStream_.str();
\r
2916 result = ASIOInit( &driverInfo );
\r
2917 if ( result != ASE_OK ) {
\r
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2919 errorText_ = errorStream_.str();
\r
2924 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2925 bool buffersAllocated = false;
\r
2926 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2927 unsigned int nChannels;
\r
2930 // Check the device channel count.
\r
2931 long inputChannels, outputChannels;
\r
2932 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2933 if ( result != ASE_OK ) {
\r
2934 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2935 errorText_ = errorStream_.str();
\r
2939 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2940 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2942 errorText_ = errorStream_.str();
\r
2945 stream_.nDeviceChannels[mode] = channels;
\r
2946 stream_.nUserChannels[mode] = channels;
\r
2947 stream_.channelOffset[mode] = firstChannel;
\r
2949 // Verify the sample rate is supported.
\r
2950 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2951 if ( result != ASE_OK ) {
\r
2952 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2953 errorText_ = errorStream_.str();
\r
2957 // Get the current sample rate
\r
2958 ASIOSampleRate currentRate;
\r
2959 result = ASIOGetSampleRate( ¤tRate );
\r
2960 if ( result != ASE_OK ) {
\r
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2962 errorText_ = errorStream_.str();
\r
2966 // Set the sample rate only if necessary
\r
2967 if ( currentRate != sampleRate ) {
\r
2968 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2969 if ( result != ASE_OK ) {
\r
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2971 errorText_ = errorStream_.str();
\r
2976 // Determine the driver data type.
\r
2977 ASIOChannelInfo channelInfo;
\r
2978 channelInfo.channel = 0;
\r
2979 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2980 else channelInfo.isInput = true;
\r
2981 result = ASIOGetChannelInfo( &channelInfo );
\r
2982 if ( result != ASE_OK ) {
\r
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2984 errorText_ = errorStream_.str();
\r
2988 // Assuming WINDOWS host is always little-endian.
\r
2989 stream_.doByteSwap[mode] = false;
\r
2990 stream_.userFormat = format;
\r
2991 stream_.deviceFormat[mode] = 0;
\r
2992 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2993 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2994 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2996 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2997 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2998 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3000 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3001 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3002 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3004 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3005 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3006 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3008 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3009 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3010 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3013 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3015 errorText_ = errorStream_.str();
\r
3019 // Set the buffer size. For a duplex stream, this will end up
\r
3020 // setting the buffer size based on the input constraints, which
\r
3022 long minSize, maxSize, preferSize, granularity;
\r
3023 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3024 if ( result != ASE_OK ) {
\r
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3026 errorText_ = errorStream_.str();
\r
3030 if ( isDuplexInput ) {
\r
3031 // When this is the duplex input (output was opened before), then we have to use the same
\r
3032 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3033 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3034 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3035 // to the "bufferSize" param as usual to set up processing buffers.
\r
3037 *bufferSize = stream_.bufferSize;
\r
3040 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3041 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3042 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3043 else if ( granularity == -1 ) {
\r
3044 // Make sure bufferSize is a power of two.
\r
3045 int log2_of_min_size = 0;
\r
3046 int log2_of_max_size = 0;
\r
3048 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3049 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3050 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3053 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3054 int min_delta_num = log2_of_min_size;
\r
3056 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3057 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3058 if (current_delta < min_delta) {
\r
3059 min_delta = current_delta;
\r
3060 min_delta_num = i;
\r
3064 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3065 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3066 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3068 else if ( granularity != 0 ) {
\r
3069 // Set to an even multiple of granularity, rounding up.
\r
3070 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3075 // we don't use it anymore, see above!
\r
3076 // Just left it here for the case...
\r
3077 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3078 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3083 stream_.bufferSize = *bufferSize;
\r
3084 stream_.nBuffers = 2;
\r
3086 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3087 else stream_.userInterleaved = true;
\r
3089 // ASIO always uses non-interleaved buffers.
\r
3090 stream_.deviceInterleaved[mode] = false;
\r
3092 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3093 if ( handle == 0 ) {
\r
3095 handle = new AsioHandle;
\r
3097 catch ( std::bad_alloc& ) {
\r
3098 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3101 handle->bufferInfos = 0;
\r
3103 // Create a manual-reset event.
\r
3104 handle->condition = CreateEvent( NULL, // no security
\r
3105 TRUE, // manual-reset
\r
3106 FALSE, // non-signaled initially
\r
3107 NULL ); // unnamed
\r
3108 stream_.apiHandle = (void *) handle;
\r
3111 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3112 // and output separately, we'll have to dispose of previously
\r
3113 // created output buffers for a duplex stream.
\r
3114 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3115 ASIODisposeBuffers();
\r
3116 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3119 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3121 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3122 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3123 if ( handle->bufferInfos == NULL ) {
\r
3124 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3125 errorText_ = errorStream_.str();
\r
3129 ASIOBufferInfo *infos;
\r
3130 infos = handle->bufferInfos;
\r
3131 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3132 infos->isInput = ASIOFalse;
\r
3133 infos->channelNum = i + stream_.channelOffset[0];
\r
3134 infos->buffers[0] = infos->buffers[1] = 0;
\r
3136 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3137 infos->isInput = ASIOTrue;
\r
3138 infos->channelNum = i + stream_.channelOffset[1];
\r
3139 infos->buffers[0] = infos->buffers[1] = 0;
\r
3142 // prepare for callbacks
\r
3143 stream_.sampleRate = sampleRate;
\r
3144 stream_.device[mode] = device;
\r
3145 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3147 // store this class instance before registering callbacks, that are going to use it
\r
3148 asioCallbackInfo = &stream_.callbackInfo;
\r
3149 stream_.callbackInfo.object = (void *) this;
\r
3151 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3152 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3153 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3154 asioCallbacks.asioMessage = &asioMessages;
\r
3155 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3156 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3157 if ( result != ASE_OK ) {
\r
3158 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3159 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3160 // in that case, let's be naïve and try that instead
\r
3161 *bufferSize = preferSize;
\r
3162 stream_.bufferSize = *bufferSize;
\r
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3166 if ( result != ASE_OK ) {
\r
3167 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3168 errorText_ = errorStream_.str();
\r
3171 buffersAllocated = true;
\r
3172 stream_.state = STREAM_STOPPED;
\r
3174 // Set flags for buffer conversion.
\r
3175 stream_.doConvertBuffer[mode] = false;
\r
3176 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3177 stream_.doConvertBuffer[mode] = true;
\r
3178 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3179 stream_.nUserChannels[mode] > 1 )
\r
3180 stream_.doConvertBuffer[mode] = true;
\r
3182 // Allocate necessary internal buffers
\r
3183 unsigned long bufferBytes;
\r
3184 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3185 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3186 if ( stream_.userBuffer[mode] == NULL ) {
\r
3187 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3191 if ( stream_.doConvertBuffer[mode] ) {
\r
3193 bool makeBuffer = true;
\r
3194 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3195 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3196 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3197 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3200 if ( makeBuffer ) {
\r
3201 bufferBytes *= *bufferSize;
\r
3202 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3203 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3204 if ( stream_.deviceBuffer == NULL ) {
\r
3205 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3211 // Determine device latencies
\r
3212 long inputLatency, outputLatency;
\r
3213 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3214 if ( result != ASE_OK ) {
\r
3215 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3216 errorText_ = errorStream_.str();
\r
3217 error( RtAudioError::WARNING); // warn but don't fail
\r
3220 stream_.latency[0] = outputLatency;
\r
3221 stream_.latency[1] = inputLatency;
\r
3224 // Setup the buffer conversion information structure. We don't use
\r
3225 // buffers to do channel offsets, so we override that parameter
\r
3227 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3232 if ( !isDuplexInput ) {
\r
3233 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3234 // So we clean up for single channel only
\r
3236 if ( buffersAllocated )
\r
3237 ASIODisposeBuffers();
\r
3239 drivers.removeCurrentDriver();
\r
3242 CloseHandle( handle->condition );
\r
3243 if ( handle->bufferInfos )
\r
3244 free( handle->bufferInfos );
\r
3247 stream_.apiHandle = 0;
\r
3251 if ( stream_.userBuffer[mode] ) {
\r
3252 free( stream_.userBuffer[mode] );
\r
3253 stream_.userBuffer[mode] = 0;
\r
3256 if ( stream_.deviceBuffer ) {
\r
3257 free( stream_.deviceBuffer );
\r
3258 stream_.deviceBuffer = 0;
\r
3263 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3265 void RtApiAsio :: closeStream()
\r
3267 if ( stream_.state == STREAM_CLOSED ) {
\r
3268 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3269 error( RtAudioError::WARNING );
\r
3273 if ( stream_.state == STREAM_RUNNING ) {
\r
3274 stream_.state = STREAM_STOPPED;
\r
3277 ASIODisposeBuffers();
\r
3278 drivers.removeCurrentDriver();
\r
3280 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3282 CloseHandle( handle->condition );
\r
3283 if ( handle->bufferInfos )
\r
3284 free( handle->bufferInfos );
\r
3286 stream_.apiHandle = 0;
\r
3289 for ( int i=0; i<2; i++ ) {
\r
3290 if ( stream_.userBuffer[i] ) {
\r
3291 free( stream_.userBuffer[i] );
\r
3292 stream_.userBuffer[i] = 0;
\r
3296 if ( stream_.deviceBuffer ) {
\r
3297 free( stream_.deviceBuffer );
\r
3298 stream_.deviceBuffer = 0;
\r
3301 stream_.mode = UNINITIALIZED;
\r
3302 stream_.state = STREAM_CLOSED;
\r
3305 bool stopThreadCalled = false;
\r
3307 void RtApiAsio :: startStream()
\r
3310 if ( stream_.state == STREAM_RUNNING ) {
\r
3311 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3312 error( RtAudioError::WARNING );
\r
3316 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3317 ASIOError result = ASIOStart();
\r
3318 if ( result != ASE_OK ) {
\r
3319 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3320 errorText_ = errorStream_.str();
\r
3324 handle->drainCounter = 0;
\r
3325 handle->internalDrain = false;
\r
3326 ResetEvent( handle->condition );
\r
3327 stream_.state = STREAM_RUNNING;
\r
3331 stopThreadCalled = false;
\r
3333 if ( result == ASE_OK ) return;
\r
3334 error( RtAudioError::SYSTEM_ERROR );
\r
3337 void RtApiAsio :: stopStream()
\r
3340 if ( stream_.state == STREAM_STOPPED ) {
\r
3341 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3342 error( RtAudioError::WARNING );
\r
3346 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3347 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3348 if ( handle->drainCounter == 0 ) {
\r
3349 handle->drainCounter = 2;
\r
3350 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3354 stream_.state = STREAM_STOPPED;
\r
3356 ASIOError result = ASIOStop();
\r
3357 if ( result != ASE_OK ) {
\r
3358 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3359 errorText_ = errorStream_.str();
\r
3362 if ( result == ASE_OK ) return;
\r
3363 error( RtAudioError::SYSTEM_ERROR );
\r
3366 void RtApiAsio :: abortStream()
\r
3369 if ( stream_.state == STREAM_STOPPED ) {
\r
3370 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3371 error( RtAudioError::WARNING );
\r
3375 // The following lines were commented-out because some behavior was
\r
3376 // noted where the device buffers need to be zeroed to avoid
\r
3377 // continuing sound, even when the device buffers are completely
\r
3378 // disposed. So now, calling abort is the same as calling stop.
\r
3379 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3380 // handle->drainCounter = 2;
\r
3384 // This function will be called by a spawned thread when the user
\r
3385 // callback function signals that the stream should be stopped or
\r
3386 // aborted. It is necessary to handle it this way because the
\r
3387 // callbackEvent() function must return before the ASIOStop()
\r
3388 // function will return.
\r
3389 static unsigned __stdcall asioStopStream( void *ptr )
\r
3391 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3392 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3394 object->stopStream();
\r
3395 _endthreadex( 0 );
\r
3399 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3401 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3402 if ( stream_.state == STREAM_CLOSED ) {
\r
3403 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3404 error( RtAudioError::WARNING );
\r
3408 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3409 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3411 // Check if we were draining the stream and signal if finished.
\r
3412 if ( handle->drainCounter > 3 ) {
\r
3414 stream_.state = STREAM_STOPPING;
\r
3415 if ( handle->internalDrain == false )
\r
3416 SetEvent( handle->condition );
\r
3417 else { // spawn a thread to stop the stream
\r
3418 unsigned threadId;
\r
3419 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3420 &stream_.callbackInfo, 0, &threadId );
\r
3425 // Invoke user callback to get fresh output data UNLESS we are
\r
3426 // draining stream.
\r
3427 if ( handle->drainCounter == 0 ) {
\r
3428 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3429 double streamTime = getStreamTime();
\r
3430 RtAudioStreamStatus status = 0;
\r
3431 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3432 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3435 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3436 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3439 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3440 stream_.bufferSize, streamTime, status, info->userData );
\r
3441 if ( cbReturnValue == 2 ) {
\r
3442 stream_.state = STREAM_STOPPING;
\r
3443 handle->drainCounter = 2;
\r
3444 unsigned threadId;
\r
3445 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3446 &stream_.callbackInfo, 0, &threadId );
\r
3449 else if ( cbReturnValue == 1 ) {
\r
3450 handle->drainCounter = 1;
\r
3451 handle->internalDrain = true;
\r
3455 unsigned int nChannels, bufferBytes, i, j;
\r
3456 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3457 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3459 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3461 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3463 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3464 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3465 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3469 else if ( stream_.doConvertBuffer[0] ) {
\r
3471 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3472 if ( stream_.doByteSwap[0] )
\r
3473 byteSwapBuffer( stream_.deviceBuffer,
\r
3474 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3475 stream_.deviceFormat[0] );
\r
3477 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3478 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3479 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3480 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3486 if ( stream_.doByteSwap[0] )
\r
3487 byteSwapBuffer( stream_.userBuffer[0],
\r
3488 stream_.bufferSize * stream_.nUserChannels[0],
\r
3489 stream_.userFormat );
\r
3491 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3492 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3493 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3494 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3500 // Don't bother draining input
\r
3501 if ( handle->drainCounter ) {
\r
3502 handle->drainCounter++;
\r
3506 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3508 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3510 if (stream_.doConvertBuffer[1]) {
\r
3512 // Always interleave ASIO input data.
\r
3513 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3514 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3515 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3516 handle->bufferInfos[i].buffers[bufferIndex],
\r
3520 if ( stream_.doByteSwap[1] )
\r
3521 byteSwapBuffer( stream_.deviceBuffer,
\r
3522 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3523 stream_.deviceFormat[1] );
\r
3524 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3528 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3529 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3530 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3531 handle->bufferInfos[i].buffers[bufferIndex],
\r
3536 if ( stream_.doByteSwap[1] )
\r
3537 byteSwapBuffer( stream_.userBuffer[1],
\r
3538 stream_.bufferSize * stream_.nUserChannels[1],
\r
3539 stream_.userFormat );
\r
3544 // The following call was suggested by Malte Clasen. While the API
\r
3545 // documentation indicates it should not be required, some device
\r
3546 // drivers apparently do not function correctly without it.
\r
3547 ASIOOutputReady();
\r
3549 RtApi::tickStreamTime();
\r
3553 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3555 // The ASIO documentation says that this usually only happens during
\r
3556 // external sync. Audio processing is not stopped by the driver,
\r
3557 // actual sample rate might not have even changed, maybe only the
\r
3558 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3561 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3563 object->stopStream();
\r
3565 catch ( RtAudioError &exception ) {
\r
3566 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3570 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3573 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3577 switch( selector ) {
\r
3578 case kAsioSelectorSupported:
\r
3579 if ( value == kAsioResetRequest
\r
3580 || value == kAsioEngineVersion
\r
3581 || value == kAsioResyncRequest
\r
3582 || value == kAsioLatenciesChanged
\r
3583 // The following three were added for ASIO 2.0, you don't
\r
3584 // necessarily have to support them.
\r
3585 || value == kAsioSupportsTimeInfo
\r
3586 || value == kAsioSupportsTimeCode
\r
3587 || value == kAsioSupportsInputMonitor)
\r
3590 case kAsioResetRequest:
\r
3591 // Defer the task and perform the reset of the driver during the
\r
3592 // next "safe" situation. You cannot reset the driver right now,
\r
3593 // as this code is called from the driver. Reset the driver is
\r
3594 // done by completely destruct is. I.e. ASIOStop(),
\r
3595 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3597 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3600 case kAsioResyncRequest:
\r
3601 // This informs the application that the driver encountered some
\r
3602 // non-fatal data loss. It is used for synchronization purposes
\r
3603 // of different media. Added mainly to work around the Win16Mutex
\r
3604 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3605 // which could lose data because the Mutex was held too long by
\r
3606 // another thread. However a driver can issue it in other
\r
3607 // situations, too.
\r
3608 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3612 case kAsioLatenciesChanged:
\r
3613 // This will inform the host application that the drivers were
\r
3614 // latencies changed. Beware, it this does not mean that the
\r
3615 // buffer sizes have changed! You might need to update internal
\r
3617 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3620 case kAsioEngineVersion:
\r
3621 // Return the supported ASIO version of the host application. If
\r
3622 // a host application does not implement this selector, ASIO 1.0
\r
3623 // is assumed by the driver.
\r
3626 case kAsioSupportsTimeInfo:
\r
3627 // Informs the driver whether the
\r
3628 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3629 // For compatibility with ASIO 1.0 drivers the host application
\r
3630 // should always support the "old" bufferSwitch method, too.
\r
3633 case kAsioSupportsTimeCode:
\r
3634 // Informs the driver whether application is interested in time
\r
3635 // code info. If an application does not need to know about time
\r
3636 // code, the driver has less work to do.
\r
3643 static const char* getAsioErrorString( ASIOError result )
\r
3648 const char*message;
\r
3651 static const Messages m[] =
\r
3653 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3654 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3655 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3656 { ASE_InvalidMode, "Invalid mode." },
\r
3657 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3658 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3659 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3662 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3663 if ( m[i].value == result ) return m[i].message;
\r
3665 return "Unknown error.";
\r
3668 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3672 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3674 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3675 // - Introduces support for the Windows WASAPI API
\r
3676 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3677 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3678 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3683 #include <audioclient.h>
\r
3685 #include <mmdeviceapi.h>
\r
3686 #include <functiondiscoverykeys_devpkey.h>
\r
3688 //=============================================================================
\r
3690 #define SAFE_RELEASE( objectPtr )\
\r
3693 objectPtr->Release();\
\r
3694 objectPtr = NULL;\
\r
3697 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3699 //-----------------------------------------------------------------------------
\r
3701 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3702 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3703 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3704 // provide intermediate storage for read / write synchronization.
\r
3705 class WasapiBuffer
\r
3709 : buffer_( NULL ),
\r
3718 // sets the length of the internal ring buffer
\r
3719 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3722 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3724 bufferSize_ = bufferSize;
\r
3729 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3730 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3732 if ( !buffer || // incoming buffer is NULL
\r
3733 bufferSize == 0 || // incoming buffer has no data
\r
3734 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3739 unsigned int relOutIndex = outIndex_;
\r
3740 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3741 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3742 relOutIndex += bufferSize_;
\r
3745 // "in" index can end on the "out" index but cannot begin at it
\r
3746 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3747 return false; // not enough space between "in" index and "out" index
\r
3750 // copy buffer from external to internal
\r
3751 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3752 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3753 int fromInSize = bufferSize - fromZeroSize;
\r
3757 case RTAUDIO_SINT8:
\r
3758 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3759 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3761 case RTAUDIO_SINT16:
\r
3762 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3763 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3765 case RTAUDIO_SINT24:
\r
3766 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3767 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3769 case RTAUDIO_SINT32:
\r
3770 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3771 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3773 case RTAUDIO_FLOAT32:
\r
3774 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3775 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3777 case RTAUDIO_FLOAT64:
\r
3778 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3779 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3783 // update "in" index
\r
3784 inIndex_ += bufferSize;
\r
3785 inIndex_ %= bufferSize_;
\r
3790 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3791 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3793 if ( !buffer || // incoming buffer is NULL
\r
3794 bufferSize == 0 || // incoming buffer has no data
\r
3795 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3800 unsigned int relInIndex = inIndex_;
\r
3801 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3802 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3803 relInIndex += bufferSize_;
\r
3806 // "out" index can begin at and end on the "in" index
\r
3807 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3808 return false; // not enough space between "out" index and "in" index
\r
3811 // copy buffer from internal to external
\r
3812 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3813 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3814 int fromOutSize = bufferSize - fromZeroSize;
\r
3818 case RTAUDIO_SINT8:
\r
3819 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3820 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3822 case RTAUDIO_SINT16:
\r
3823 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3824 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3826 case RTAUDIO_SINT24:
\r
3827 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3828 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3830 case RTAUDIO_SINT32:
\r
3831 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3832 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3834 case RTAUDIO_FLOAT32:
\r
3835 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3836 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3838 case RTAUDIO_FLOAT64:
\r
3839 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3840 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3844 // update "out" index
\r
3845 outIndex_ += bufferSize;
\r
3846 outIndex_ %= bufferSize_;
\r
3853 unsigned int bufferSize_;
\r
3854 unsigned int inIndex_;
\r
3855 unsigned int outIndex_;
\r
3858 //-----------------------------------------------------------------------------
\r
3860 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3861 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3862 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3863 // This sample rate converter works best with conversions between one rate and its multiple.
\r
3864 void convertBufferWasapi( char* outBuffer,
\r
3865 const char* inBuffer,
\r
3866 const unsigned int& channelCount,
\r
3867 const unsigned int& inSampleRate,
\r
3868 const unsigned int& outSampleRate,
\r
3869 const unsigned int& inSampleCount,
\r
3870 unsigned int& outSampleCount,
\r
3871 const RtAudioFormat& format )
\r
3873 // calculate the new outSampleCount and relative sampleStep
\r
3874 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3875 float sampleRatioInv = ( float ) 1 / sampleRatio;
\r
3876 float sampleStep = 1.0f / sampleRatio;
\r
3877 float inSampleFraction = 0.0f;
\r
3879 outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
\r
3881 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
\r
3882 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
\r
3884 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3885 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3887 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3891 case RTAUDIO_SINT8:
\r
3892 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3894 case RTAUDIO_SINT16:
\r
3895 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3897 case RTAUDIO_SINT24:
\r
3898 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3900 case RTAUDIO_SINT32:
\r
3901 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3903 case RTAUDIO_FLOAT32:
\r
3904 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3906 case RTAUDIO_FLOAT64:
\r
3907 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3911 // jump to next in sample
\r
3912 inSampleFraction += sampleStep;
\r
3915 else // else interpolate
\r
3917 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3918 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3920 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3921 float inSampleDec = inSampleFraction - inSample;
\r
3922 unsigned int frameInSample = inSample * channelCount;
\r
3923 unsigned int frameOutSample = outSample * channelCount;
\r
3927 case RTAUDIO_SINT8:
\r
3929 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3931 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
\r
3932 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3933 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
\r
3934 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3938 case RTAUDIO_SINT16:
\r
3940 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3942 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
\r
3943 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3944 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
\r
3945 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3949 case RTAUDIO_SINT24:
\r
3951 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3953 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
\r
3954 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
\r
3955 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3956 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3960 case RTAUDIO_SINT32:
\r
3962 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3964 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
\r
3965 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3966 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3967 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3971 case RTAUDIO_FLOAT32:
\r
3973 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3975 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
\r
3976 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3977 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3978 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3982 case RTAUDIO_FLOAT64:
\r
3984 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3986 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
\r
3987 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3988 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3989 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3995 // jump to next in sample
\r
3996 inSampleFraction += sampleStep;
\r
4001 //-----------------------------------------------------------------------------
\r
4003 // A structure to hold various information related to the WASAPI implementation.
\r
4004 struct WasapiHandle
\r
4006 IAudioClient* captureAudioClient;
\r
4007 IAudioClient* renderAudioClient;
\r
4008 IAudioCaptureClient* captureClient;
\r
4009 IAudioRenderClient* renderClient;
\r
4010 HANDLE captureEvent;
\r
4011 HANDLE renderEvent;
\r
4014 : captureAudioClient( NULL ),
\r
4015 renderAudioClient( NULL ),
\r
4016 captureClient( NULL ),
\r
4017 renderClient( NULL ),
\r
4018 captureEvent( NULL ),
\r
4019 renderEvent( NULL ) {}
\r
4022 //=============================================================================
\r
4024 RtApiWasapi::RtApiWasapi()
\r
4025 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
4027 // WASAPI can run either apartment or multi-threaded
\r
4028 HRESULT hr = CoInitialize( NULL );
\r
4029 if ( !FAILED( hr ) )
\r
4030 coInitialized_ = true;
\r
4032 // Instantiate device enumerator
\r
4033 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
4034 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
4035 ( void** ) &deviceEnumerator_ );
\r
4037 if ( FAILED( hr ) ) {
\r
4038 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
4039 error( RtAudioError::DRIVER_ERROR );
\r
4043 //-----------------------------------------------------------------------------
\r
4045 RtApiWasapi::~RtApiWasapi()
\r
4047 if ( stream_.state != STREAM_CLOSED )
\r
4050 SAFE_RELEASE( deviceEnumerator_ );
\r
4052 // If this object previously called CoInitialize()
\r
4053 if ( coInitialized_ )
\r
4057 //=============================================================================
\r
4059 unsigned int RtApiWasapi::getDeviceCount( void )
\r
4061 unsigned int captureDeviceCount = 0;
\r
4062 unsigned int renderDeviceCount = 0;
\r
4064 IMMDeviceCollection* captureDevices = NULL;
\r
4065 IMMDeviceCollection* renderDevices = NULL;
\r
4067 // Count capture devices
\r
4068 errorText_.clear();
\r
4069 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4070 if ( FAILED( hr ) ) {
\r
4071 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4075 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4076 if ( FAILED( hr ) ) {
\r
4077 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4081 // Count render devices
\r
4082 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4083 if ( FAILED( hr ) ) {
\r
4084 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4088 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4089 if ( FAILED( hr ) ) {
\r
4090 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4095 // release all references
\r
4096 SAFE_RELEASE( captureDevices );
\r
4097 SAFE_RELEASE( renderDevices );
\r
4099 if ( errorText_.empty() )
\r
4100 return captureDeviceCount + renderDeviceCount;
\r
4102 error( RtAudioError::DRIVER_ERROR );
\r
4106 //-----------------------------------------------------------------------------
\r
4108 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4110 RtAudio::DeviceInfo info;
\r
4111 unsigned int captureDeviceCount = 0;
\r
4112 unsigned int renderDeviceCount = 0;
\r
4113 std::string defaultDeviceName;
\r
4114 bool isCaptureDevice = false;
\r
4116 PROPVARIANT deviceNameProp;
\r
4117 PROPVARIANT defaultDeviceNameProp;
\r
4119 IMMDeviceCollection* captureDevices = NULL;
\r
4120 IMMDeviceCollection* renderDevices = NULL;
\r
4121 IMMDevice* devicePtr = NULL;
\r
4122 IMMDevice* defaultDevicePtr = NULL;
\r
4123 IAudioClient* audioClient = NULL;
\r
4124 IPropertyStore* devicePropStore = NULL;
\r
4125 IPropertyStore* defaultDevicePropStore = NULL;
\r
4127 WAVEFORMATEX* deviceFormat = NULL;
\r
4128 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4131 info.probed = false;
\r
4133 // Count capture devices
\r
4134 errorText_.clear();
\r
4135 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4136 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4137 if ( FAILED( hr ) ) {
\r
4138 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4142 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4143 if ( FAILED( hr ) ) {
\r
4144 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4148 // Count render devices
\r
4149 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4150 if ( FAILED( hr ) ) {
\r
4151 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4155 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4156 if ( FAILED( hr ) ) {
\r
4157 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4161 // validate device index
\r
4162 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4163 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4164 errorType = RtAudioError::INVALID_USE;
\r
4168 // determine whether index falls within capture or render devices
\r
4169 if ( device >= renderDeviceCount ) {
\r
4170 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4171 if ( FAILED( hr ) ) {
\r
4172 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4175 isCaptureDevice = true;
\r
4178 hr = renderDevices->Item( device, &devicePtr );
\r
4179 if ( FAILED( hr ) ) {
\r
4180 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4183 isCaptureDevice = false;
\r
4186 // get default device name
\r
4187 if ( isCaptureDevice ) {
\r
4188 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4189 if ( FAILED( hr ) ) {
\r
4190 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4195 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4196 if ( FAILED( hr ) ) {
\r
4197 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4202 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4203 if ( FAILED( hr ) ) {
\r
4204 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4207 PropVariantInit( &defaultDeviceNameProp );
\r
4209 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4210 if ( FAILED( hr ) ) {
\r
4211 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4215 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4218 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4219 if ( FAILED( hr ) ) {
\r
4220 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4224 PropVariantInit( &deviceNameProp );
\r
4226 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4227 if ( FAILED( hr ) ) {
\r
4228 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4232 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4235 if ( isCaptureDevice ) {
\r
4236 info.isDefaultInput = info.name == defaultDeviceName;
\r
4237 info.isDefaultOutput = false;
\r
4240 info.isDefaultInput = false;
\r
4241 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4245 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4246 if ( FAILED( hr ) ) {
\r
4247 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4251 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4252 if ( FAILED( hr ) ) {
\r
4253 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4257 if ( isCaptureDevice ) {
\r
4258 info.inputChannels = deviceFormat->nChannels;
\r
4259 info.outputChannels = 0;
\r
4260 info.duplexChannels = 0;
\r
4263 info.inputChannels = 0;
\r
4264 info.outputChannels = deviceFormat->nChannels;
\r
4265 info.duplexChannels = 0;
\r
4269 info.sampleRates.clear();
\r
4271 // allow support for all sample rates as we have a built-in sample rate converter
\r
4272 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4273 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4275 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4278 info.nativeFormats = 0;
\r
4280 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4281 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4282 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4284 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4285 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4287 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4288 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4291 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4292 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4293 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4295 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4296 info.nativeFormats |= RTAUDIO_SINT8;
\r
4298 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4299 info.nativeFormats |= RTAUDIO_SINT16;
\r
4301 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4302 info.nativeFormats |= RTAUDIO_SINT24;
\r
4304 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4305 info.nativeFormats |= RTAUDIO_SINT32;
\r
4310 info.probed = true;
\r
4313 // release all references
\r
4314 PropVariantClear( &deviceNameProp );
\r
4315 PropVariantClear( &defaultDeviceNameProp );
\r
4317 SAFE_RELEASE( captureDevices );
\r
4318 SAFE_RELEASE( renderDevices );
\r
4319 SAFE_RELEASE( devicePtr );
\r
4320 SAFE_RELEASE( defaultDevicePtr );
\r
4321 SAFE_RELEASE( audioClient );
\r
4322 SAFE_RELEASE( devicePropStore );
\r
4323 SAFE_RELEASE( defaultDevicePropStore );
\r
4325 CoTaskMemFree( deviceFormat );
\r
4326 CoTaskMemFree( closestMatchFormat );
\r
4328 if ( !errorText_.empty() )
\r
4329 error( errorType );
\r
4333 //-----------------------------------------------------------------------------
\r
4335 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4337 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4338 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4346 //-----------------------------------------------------------------------------
\r
4348 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4350 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4351 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4359 //-----------------------------------------------------------------------------
\r
4361 void RtApiWasapi::closeStream( void )
\r
4363 if ( stream_.state == STREAM_CLOSED ) {
\r
4364 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4365 error( RtAudioError::WARNING );
\r
4369 if ( stream_.state != STREAM_STOPPED )
\r
4372 // clean up stream memory
\r
4373 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4374 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4376 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4377 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4379 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4380 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4382 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4383 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4385 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4386 stream_.apiHandle = NULL;
\r
4388 for ( int i = 0; i < 2; i++ ) {
\r
4389 if ( stream_.userBuffer[i] ) {
\r
4390 free( stream_.userBuffer[i] );
\r
4391 stream_.userBuffer[i] = 0;
\r
4395 if ( stream_.deviceBuffer ) {
\r
4396 free( stream_.deviceBuffer );
\r
4397 stream_.deviceBuffer = 0;
\r
4400 // update stream state
\r
4401 stream_.state = STREAM_CLOSED;
\r
4404 //-----------------------------------------------------------------------------
\r
4406 void RtApiWasapi::startStream( void )
\r
4410 if ( stream_.state == STREAM_RUNNING ) {
\r
4411 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4412 error( RtAudioError::WARNING );
\r
4416 // update stream state
\r
4417 stream_.state = STREAM_RUNNING;
\r
4419 // create WASAPI stream thread
\r
4420 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4422 if ( !stream_.callbackInfo.thread ) {
\r
4423 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4424 error( RtAudioError::THREAD_ERROR );
\r
4427 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4428 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4432 //-----------------------------------------------------------------------------
\r
4434 void RtApiWasapi::stopStream( void )
\r
4438 if ( stream_.state == STREAM_STOPPED ) {
\r
4439 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4440 error( RtAudioError::WARNING );
\r
4444 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4445 stream_.state = STREAM_STOPPING;
\r
4447 // wait until stream thread is stopped
\r
4448 while( stream_.state != STREAM_STOPPED ) {
\r
4452 // Wait for the last buffer to play before stopping.
\r
4453 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4455 // stop capture client if applicable
\r
4456 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4457 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4458 if ( FAILED( hr ) ) {
\r
4459 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4460 error( RtAudioError::DRIVER_ERROR );
\r
4465 // stop render client if applicable
\r
4466 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4467 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4468 if ( FAILED( hr ) ) {
\r
4469 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4470 error( RtAudioError::DRIVER_ERROR );
\r
4475 // close thread handle
\r
4476 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4477 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4478 error( RtAudioError::THREAD_ERROR );
\r
4482 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4485 //-----------------------------------------------------------------------------
\r
4487 void RtApiWasapi::abortStream( void )
\r
4491 if ( stream_.state == STREAM_STOPPED ) {
\r
4492 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4493 error( RtAudioError::WARNING );
\r
4497 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4498 stream_.state = STREAM_STOPPING;
\r
4500 // wait until stream thread is stopped
\r
4501 while ( stream_.state != STREAM_STOPPED ) {
\r
4505 // stop capture client if applicable
\r
4506 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4507 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4508 if ( FAILED( hr ) ) {
\r
4509 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4510 error( RtAudioError::DRIVER_ERROR );
\r
4515 // stop render client if applicable
\r
4516 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4517 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4518 if ( FAILED( hr ) ) {
\r
4519 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4520 error( RtAudioError::DRIVER_ERROR );
\r
4525 // close thread handle
\r
4526 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4527 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4528 error( RtAudioError::THREAD_ERROR );
\r
4532 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4535 //-----------------------------------------------------------------------------
\r
4537 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4538 unsigned int firstChannel, unsigned int sampleRate,
\r
4539 RtAudioFormat format, unsigned int* bufferSize,
\r
4540 RtAudio::StreamOptions* options )
\r
4542 bool methodResult = FAILURE;
\r
4543 unsigned int captureDeviceCount = 0;
\r
4544 unsigned int renderDeviceCount = 0;
\r
4546 IMMDeviceCollection* captureDevices = NULL;
\r
4547 IMMDeviceCollection* renderDevices = NULL;
\r
4548 IMMDevice* devicePtr = NULL;
\r
4549 WAVEFORMATEX* deviceFormat = NULL;
\r
4550 unsigned int bufferBytes;
\r
4551 stream_.state = STREAM_STOPPED;
\r
4553 // create API Handle if not already created
\r
4554 if ( !stream_.apiHandle )
\r
4555 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4557 // Count capture devices
\r
4558 errorText_.clear();
\r
4559 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4560 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4561 if ( FAILED( hr ) ) {
\r
4562 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4566 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4567 if ( FAILED( hr ) ) {
\r
4568 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4572 // Count render devices
\r
4573 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4574 if ( FAILED( hr ) ) {
\r
4575 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4579 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4580 if ( FAILED( hr ) ) {
\r
4581 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4585 // validate device index
\r
4586 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4587 errorType = RtAudioError::INVALID_USE;
\r
4588 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4592 // determine whether index falls within capture or render devices
\r
4593 if ( device >= renderDeviceCount ) {
\r
4594 if ( mode != INPUT ) {
\r
4595 errorType = RtAudioError::INVALID_USE;
\r
4596 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4600 // retrieve captureAudioClient from devicePtr
\r
4601 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4603 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4604 if ( FAILED( hr ) ) {
\r
4605 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4609 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4610 NULL, ( void** ) &captureAudioClient );
\r
4611 if ( FAILED( hr ) ) {
\r
4612 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4616 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4617 if ( FAILED( hr ) ) {
\r
4618 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4622 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4623 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4626 if ( mode != OUTPUT ) {
\r
4627 errorType = RtAudioError::INVALID_USE;
\r
4628 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4632 // retrieve renderAudioClient from devicePtr
\r
4633 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4635 hr = renderDevices->Item( device, &devicePtr );
\r
4636 if ( FAILED( hr ) ) {
\r
4637 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4641 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4642 NULL, ( void** ) &renderAudioClient );
\r
4643 if ( FAILED( hr ) ) {
\r
4644 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4648 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4649 if ( FAILED( hr ) ) {
\r
4650 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4654 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4655 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4658 // fill stream data
\r
4659 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4660 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4661 stream_.mode = DUPLEX;
\r
4664 stream_.mode = mode;
\r
4667 stream_.device[mode] = device;
\r
4668 stream_.doByteSwap[mode] = false;
\r
4669 stream_.sampleRate = sampleRate;
\r
4670 stream_.bufferSize = *bufferSize;
\r
4671 stream_.nBuffers = 1;
\r
4672 stream_.nUserChannels[mode] = channels;
\r
4673 stream_.channelOffset[mode] = firstChannel;
\r
4674 stream_.userFormat = format;
\r
4675 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4677 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4678 stream_.userInterleaved = false;
\r
4680 stream_.userInterleaved = true;
\r
4681 stream_.deviceInterleaved[mode] = true;
\r
4683 // Set flags for buffer conversion.
\r
4684 stream_.doConvertBuffer[mode] = false;
\r
4685 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4686 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4687 stream_.doConvertBuffer[mode] = true;
\r
4688 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4689 stream_.nUserChannels[mode] > 1 )
\r
4690 stream_.doConvertBuffer[mode] = true;
\r
4692 if ( stream_.doConvertBuffer[mode] )
\r
4693 setConvertInfo( mode, 0 );
\r
4695 // Allocate necessary internal buffers
\r
4696 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4698 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4699 if ( !stream_.userBuffer[mode] ) {
\r
4700 errorType = RtAudioError::MEMORY_ERROR;
\r
4701 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4705 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4706 stream_.callbackInfo.priority = 15;
\r
4708 stream_.callbackInfo.priority = 0;
\r
4710 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4711 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4713 methodResult = SUCCESS;
\r
4717 SAFE_RELEASE( captureDevices );
\r
4718 SAFE_RELEASE( renderDevices );
\r
4719 SAFE_RELEASE( devicePtr );
\r
4720 CoTaskMemFree( deviceFormat );
\r
4722 // if method failed, close the stream
\r
4723 if ( methodResult == FAILURE )
\r
4726 if ( !errorText_.empty() )
\r
4727 error( errorType );
\r
4728 return methodResult;
\r
4731 //=============================================================================
\r
4733 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4736 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4741 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4744 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4749 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4752 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4757 //-----------------------------------------------------------------------------
\r
4759 void RtApiWasapi::wasapiThread()
\r
4761 // as this is a new thread, we must CoInitialize it
\r
4762 CoInitialize( NULL );
\r
4766 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4767 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4768 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4769 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4770 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4771 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4773 WAVEFORMATEX* captureFormat = NULL;
\r
4774 WAVEFORMATEX* renderFormat = NULL;
\r
4775 float captureSrRatio = 0.0f;
\r
4776 float renderSrRatio = 0.0f;
\r
4777 WasapiBuffer captureBuffer;
\r
4778 WasapiBuffer renderBuffer;
\r
4780 // declare local stream variables
\r
4781 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4782 BYTE* streamBuffer = NULL;
\r
4783 unsigned long captureFlags = 0;
\r
4784 unsigned int bufferFrameCount = 0;
\r
4785 unsigned int numFramesPadding = 0;
\r
4786 unsigned int convBufferSize = 0;
\r
4787 bool callbackPushed = false;
\r
4788 bool callbackPulled = false;
\r
4789 bool callbackStopped = false;
\r
4790 int callbackResult = 0;
\r
4792 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4793 char* convBuffer = NULL;
\r
4794 unsigned int convBuffSize = 0;
\r
4795 unsigned int deviceBuffSize = 0;
\r
4797 errorText_.clear();
\r
4798 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4800 // Attempt to assign "Pro Audio" characteristic to thread
\r
4801 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4803 DWORD taskIndex = 0;
\r
4804 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4805 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4806 FreeLibrary( AvrtDll );
\r
4809 // start capture stream if applicable
\r
4810 if ( captureAudioClient ) {
\r
4811 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4812 if ( FAILED( hr ) ) {
\r
4813 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4817 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4819 // initialize capture stream according to desire buffer size
\r
4820 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4821 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4823 if ( !captureClient ) {
\r
4824 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4825 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4826 desiredBufferPeriod,
\r
4827 desiredBufferPeriod,
\r
4830 if ( FAILED( hr ) ) {
\r
4831 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4835 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4836 ( void** ) &captureClient );
\r
4837 if ( FAILED( hr ) ) {
\r
4838 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4842 // configure captureEvent to trigger on every available capture buffer
\r
4843 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4844 if ( !captureEvent ) {
\r
4845 errorType = RtAudioError::SYSTEM_ERROR;
\r
4846 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4850 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4851 if ( FAILED( hr ) ) {
\r
4852 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4856 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4857 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4860 unsigned int inBufferSize = 0;
\r
4861 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4862 if ( FAILED( hr ) ) {
\r
4863 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4867 // scale outBufferSize according to stream->user sample rate ratio
\r
4868 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4869 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4871 // set captureBuffer size
\r
4872 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4874 // reset the capture stream
\r
4875 hr = captureAudioClient->Reset();
\r
4876 if ( FAILED( hr ) ) {
\r
4877 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4881 // start the capture stream
\r
4882 hr = captureAudioClient->Start();
\r
4883 if ( FAILED( hr ) ) {
\r
4884 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4889 // start render stream if applicable
\r
4890 if ( renderAudioClient ) {
\r
4891 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4892 if ( FAILED( hr ) ) {
\r
4893 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4897 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4899 // initialize render stream according to desire buffer size
\r
4900 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4901 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4903 if ( !renderClient ) {
\r
4904 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4905 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4906 desiredBufferPeriod,
\r
4907 desiredBufferPeriod,
\r
4910 if ( FAILED( hr ) ) {
\r
4911 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4915 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4916 ( void** ) &renderClient );
\r
4917 if ( FAILED( hr ) ) {
\r
4918 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4922 // configure renderEvent to trigger on every available render buffer
\r
4923 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4924 if ( !renderEvent ) {
\r
4925 errorType = RtAudioError::SYSTEM_ERROR;
\r
4926 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4930 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4931 if ( FAILED( hr ) ) {
\r
4932 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4936 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4937 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4940 unsigned int outBufferSize = 0;
\r
4941 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4942 if ( FAILED( hr ) ) {
\r
4943 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4947 // scale inBufferSize according to user->stream sample rate ratio
\r
4948 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4949 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4951 // set renderBuffer size
\r
4952 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4954 // reset the render stream
\r
4955 hr = renderAudioClient->Reset();
\r
4956 if ( FAILED( hr ) ) {
\r
4957 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4961 // start the render stream
\r
4962 hr = renderAudioClient->Start();
\r
4963 if ( FAILED( hr ) ) {
\r
4964 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4969 if ( stream_.mode == INPUT ) {
\r
4970 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4971 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4973 else if ( stream_.mode == OUTPUT ) {
\r
4974 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4975 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4977 else if ( stream_.mode == DUPLEX ) {
\r
4978 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4979 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4980 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4981 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4984 convBuffer = ( char* ) malloc( convBuffSize );
\r
4985 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4986 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4987 errorType = RtAudioError::MEMORY_ERROR;
\r
4988 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4992 // stream process loop
\r
4993 while ( stream_.state != STREAM_STOPPING ) {
\r
4994 if ( !callbackPulled ) {
\r
4997 // 1. Pull callback buffer from inputBuffer
\r
4998 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4999 // Convert callback buffer to user format
\r
5001 if ( captureAudioClient ) {
\r
5002 // Pull callback buffer from inputBuffer
\r
5003 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
5004 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
5005 stream_.deviceFormat[INPUT] );
\r
5007 if ( callbackPulled ) {
\r
5008 // Convert callback buffer to user sample rate
\r
5009 convertBufferWasapi( stream_.deviceBuffer,
\r
5011 stream_.nDeviceChannels[INPUT],
\r
5012 captureFormat->nSamplesPerSec,
\r
5013 stream_.sampleRate,
\r
5014 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
5016 stream_.deviceFormat[INPUT] );
\r
5018 if ( stream_.doConvertBuffer[INPUT] ) {
\r
5019 // Convert callback buffer to user format
\r
5020 convertBuffer( stream_.userBuffer[INPUT],
\r
5021 stream_.deviceBuffer,
\r
5022 stream_.convertInfo[INPUT] );
\r
5025 // no further conversion, simple copy deviceBuffer to userBuffer
\r
5026 memcpy( stream_.userBuffer[INPUT],
\r
5027 stream_.deviceBuffer,
\r
5028 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
5033 // if there is no capture stream, set callbackPulled flag
\r
5034 callbackPulled = true;
\r
5037 // Execute Callback
\r
5038 // ================
\r
5039 // 1. Execute user callback method
\r
5040 // 2. Handle return value from callback
\r
5042 // if callback has not requested the stream to stop
\r
5043 if ( callbackPulled && !callbackStopped ) {
\r
5044 // Execute user callback method
\r
5045 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
5046 stream_.userBuffer[INPUT],
\r
5047 stream_.bufferSize,
\r
5049 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
5050 stream_.callbackInfo.userData );
\r
5052 // Handle return value from callback
\r
5053 if ( callbackResult == 1 ) {
\r
5054 // instantiate a thread to stop this thread
\r
5055 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
5056 if ( !threadHandle ) {
\r
5057 errorType = RtAudioError::THREAD_ERROR;
\r
5058 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
5061 else if ( !CloseHandle( threadHandle ) ) {
\r
5062 errorType = RtAudioError::THREAD_ERROR;
\r
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
5067 callbackStopped = true;
\r
5069 else if ( callbackResult == 2 ) {
\r
5070 // instantiate a thread to stop this thread
\r
5071 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
5072 if ( !threadHandle ) {
\r
5073 errorType = RtAudioError::THREAD_ERROR;
\r
5074 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5077 else if ( !CloseHandle( threadHandle ) ) {
\r
5078 errorType = RtAudioError::THREAD_ERROR;
\r
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5083 callbackStopped = true;
\r
5088 // Callback Output
\r
5089 // ===============
\r
5090 // 1. Convert callback buffer to stream format
\r
5091 // 2. Convert callback buffer to stream sample rate and channel count
\r
5092 // 3. Push callback buffer into outputBuffer
\r
5094 if ( renderAudioClient && callbackPulled ) {
\r
5095 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5096 // Convert callback buffer to stream format
\r
5097 convertBuffer( stream_.deviceBuffer,
\r
5098 stream_.userBuffer[OUTPUT],
\r
5099 stream_.convertInfo[OUTPUT] );
\r
5103 // Convert callback buffer to stream sample rate
\r
5104 convertBufferWasapi( convBuffer,
\r
5105 stream_.deviceBuffer,
\r
5106 stream_.nDeviceChannels[OUTPUT],
\r
5107 stream_.sampleRate,
\r
5108 renderFormat->nSamplesPerSec,
\r
5109 stream_.bufferSize,
\r
5111 stream_.deviceFormat[OUTPUT] );
\r
5113 // Push callback buffer into outputBuffer
\r
5114 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5115 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5116 stream_.deviceFormat[OUTPUT] );
\r
5119 // if there is no render stream, set callbackPushed flag
\r
5120 callbackPushed = true;
\r
5125 // 1. Get capture buffer from stream
\r
5126 // 2. Push capture buffer into inputBuffer
\r
5127 // 3. If 2. was successful: Release capture buffer
\r
5129 if ( captureAudioClient ) {
\r
5130 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5131 if ( !callbackPulled ) {
\r
5132 WaitForSingleObject( captureEvent, INFINITE );
\r
5135 // Get capture buffer from stream
\r
5136 hr = captureClient->GetBuffer( &streamBuffer,
\r
5137 &bufferFrameCount,
\r
5138 &captureFlags, NULL, NULL );
\r
5139 if ( FAILED( hr ) ) {
\r
5140 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5144 if ( bufferFrameCount != 0 ) {
\r
5145 // Push capture buffer into inputBuffer
\r
5146 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5147 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5148 stream_.deviceFormat[INPUT] ) )
\r
5150 // Release capture buffer
\r
5151 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5152 if ( FAILED( hr ) ) {
\r
5153 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5159 // Inform WASAPI that capture was unsuccessful
\r
5160 hr = captureClient->ReleaseBuffer( 0 );
\r
5161 if ( FAILED( hr ) ) {
\r
5162 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5169 // Inform WASAPI that capture was unsuccessful
\r
5170 hr = captureClient->ReleaseBuffer( 0 );
\r
5171 if ( FAILED( hr ) ) {
\r
5172 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5180 // 1. Get render buffer from stream
\r
5181 // 2. Pull next buffer from outputBuffer
\r
5182 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5183 // Release render buffer
\r
5185 if ( renderAudioClient ) {
\r
5186 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5187 if ( callbackPulled && !callbackPushed ) {
\r
5188 WaitForSingleObject( renderEvent, INFINITE );
\r
5191 // Get render buffer from stream
\r
5192 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5193 if ( FAILED( hr ) ) {
\r
5194 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5198 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5199 if ( FAILED( hr ) ) {
\r
5200 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5204 bufferFrameCount -= numFramesPadding;
\r
5206 if ( bufferFrameCount != 0 ) {
\r
5207 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5208 if ( FAILED( hr ) ) {
\r
5209 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5213 // Pull next buffer from outputBuffer
\r
5214 // Fill render buffer with next buffer
\r
5215 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5216 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5217 stream_.deviceFormat[OUTPUT] ) )
\r
5219 // Release render buffer
\r
5220 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5221 if ( FAILED( hr ) ) {
\r
5222 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5228 // Inform WASAPI that render was unsuccessful
\r
5229 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5230 if ( FAILED( hr ) ) {
\r
5231 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5238 // Inform WASAPI that render was unsuccessful
\r
5239 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5240 if ( FAILED( hr ) ) {
\r
5241 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5247 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5248 if ( callbackPushed ) {
\r
5249 callbackPulled = false;
\r
5250 // tick stream time
\r
5251 RtApi::tickStreamTime();
\r
5258 CoTaskMemFree( captureFormat );
\r
5259 CoTaskMemFree( renderFormat );
\r
5261 free ( convBuffer );
\r
5265 // update stream state
\r
5266 stream_.state = STREAM_STOPPED;
\r
5268 if ( errorText_.empty() )
\r
5271 error( errorType );
\r
5274 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5278 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5280 // Modified by Robin Davies, October 2005
\r
5281 // - Improvements to DirectX pointer chasing.
\r
5282 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5283 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5284 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5285 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5287 #include <dsound.h>
\r
5288 #include <assert.h>
\r
5289 #include <algorithm>
\r
5291 #if defined(__MINGW32__)
\r
5292 // missing from latest mingw winapi
\r
5293 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5294 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5295 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5296 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5299 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5301 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5302 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5305 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5307 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5308 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5309 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5310 return pointer >= earlierPointer && pointer < laterPointer;
\r
5313 // A structure to hold various information related to the DirectSound
\r
5314 // API implementation.
\r
5316 unsigned int drainCounter; // Tracks callback counts when draining
\r
5317 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5321 UINT bufferPointer[2];
\r
5322 DWORD dsBufferSize[2];
\r
5323 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5327 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5330 // Declarations for utility functions, callbacks, and structures
\r
5331 // specific to the DirectSound implementation.
\r
5332 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5333 LPCTSTR description,
\r
5335 LPVOID lpContext );
\r
5337 static const char* getErrorString( int code );
\r
5339 static unsigned __stdcall callbackHandler( void *ptr );
\r
5348 : found(false) { validId[0] = false; validId[1] = false; }
\r
5351 struct DsProbeData {
\r
5353 std::vector<struct DsDevice>* dsDevices;
\r
5356 RtApiDs :: RtApiDs()
\r
5358 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5359 // accept whatever the mainline chose for a threading model.
\r
5360 coInitialized_ = false;
\r
5361 HRESULT hr = CoInitialize( NULL );
\r
5362 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5365 RtApiDs :: ~RtApiDs()
\r
5367 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5368 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5371 // The DirectSound default output is always the first device.
\r
5372 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5377 // The DirectSound default input is always the first input device,
\r
5378 // which is the first capture device enumerated.
\r
5379 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5384 unsigned int RtApiDs :: getDeviceCount( void )
\r
5386 // Set query flag for previously found devices to false, so that we
\r
5387 // can check for any devices that have disappeared.
\r
5388 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5389 dsDevices[i].found = false;
\r
5391 // Query DirectSound devices.
\r
5392 struct DsProbeData probeInfo;
\r
5393 probeInfo.isInput = false;
\r
5394 probeInfo.dsDevices = &dsDevices;
\r
5395 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5396 if ( FAILED( result ) ) {
\r
5397 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5398 errorText_ = errorStream_.str();
\r
5399 error( RtAudioError::WARNING );
\r
5402 // Query DirectSoundCapture devices.
\r
5403 probeInfo.isInput = true;
\r
5404 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5405 if ( FAILED( result ) ) {
\r
5406 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5407 errorText_ = errorStream_.str();
\r
5408 error( RtAudioError::WARNING );
\r
5411 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5412 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5413 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5417 return static_cast<unsigned int>(dsDevices.size());
\r
5420 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5422 RtAudio::DeviceInfo info;
\r
5423 info.probed = false;
\r
5425 if ( dsDevices.size() == 0 ) {
\r
5426 // Force a query of all devices
\r
5428 if ( dsDevices.size() == 0 ) {
\r
5429 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5430 error( RtAudioError::INVALID_USE );
\r
5435 if ( device >= dsDevices.size() ) {
\r
5436 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5437 error( RtAudioError::INVALID_USE );
\r
5442 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5444 LPDIRECTSOUND output;
\r
5446 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5447 if ( FAILED( result ) ) {
\r
5448 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5449 errorText_ = errorStream_.str();
\r
5450 error( RtAudioError::WARNING );
\r
5454 outCaps.dwSize = sizeof( outCaps );
\r
5455 result = output->GetCaps( &outCaps );
\r
5456 if ( FAILED( result ) ) {
\r
5457 output->Release();
\r
5458 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5459 errorText_ = errorStream_.str();
\r
5460 error( RtAudioError::WARNING );
\r
5464 // Get output channel information.
\r
5465 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5467 // Get sample rate information.
\r
5468 info.sampleRates.clear();
\r
5469 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5470 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5471 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5472 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5474 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5475 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5479 // Get format information.
\r
5480 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5481 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5483 output->Release();
\r
5485 if ( getDefaultOutputDevice() == device )
\r
5486 info.isDefaultOutput = true;
\r
5488 if ( dsDevices[ device ].validId[1] == false ) {
\r
5489 info.name = dsDevices[ device ].name;
\r
5490 info.probed = true;
\r
5496 LPDIRECTSOUNDCAPTURE input;
\r
5497 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5498 if ( FAILED( result ) ) {
\r
5499 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5500 errorText_ = errorStream_.str();
\r
5501 error( RtAudioError::WARNING );
\r
5506 inCaps.dwSize = sizeof( inCaps );
\r
5507 result = input->GetCaps( &inCaps );
\r
5508 if ( FAILED( result ) ) {
\r
5510 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5511 errorText_ = errorStream_.str();
\r
5512 error( RtAudioError::WARNING );
\r
5516 // Get input channel information.
\r
5517 info.inputChannels = inCaps.dwChannels;
\r
5519 // Get sample rate and format information.
\r
5520 std::vector<unsigned int> rates;
\r
5521 if ( inCaps.dwChannels >= 2 ) {
\r
5522 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5523 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5524 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5525 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5526 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5527 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5528 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5529 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5531 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5532 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5533 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5534 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5535 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5537 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5538 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5539 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5540 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5541 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5544 else if ( inCaps.dwChannels == 1 ) {
\r
5545 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5546 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5547 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5548 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5549 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5550 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5551 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5552 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5554 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5555 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5556 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5557 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5558 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5560 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5561 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5562 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5563 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5564 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5567 else info.inputChannels = 0; // technically, this would be an error
\r
5571 if ( info.inputChannels == 0 ) return info;
\r
5573 // Copy the supported rates to the info structure but avoid duplication.
\r
5575 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5577 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5578 if ( rates[i] == info.sampleRates[j] ) {
\r
5583 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5585 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5587 // If device opens for both playback and capture, we determine the channels.
\r
5588 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5589 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5591 if ( device == 0 ) info.isDefaultInput = true;
\r
5593 // Copy name and return.
\r
5594 info.name = dsDevices[ device ].name;
\r
5595 info.probed = true;
\r
5599 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5600 unsigned int firstChannel, unsigned int sampleRate,
\r
5601 RtAudioFormat format, unsigned int *bufferSize,
\r
5602 RtAudio::StreamOptions *options )
\r
5604 if ( channels + firstChannel > 2 ) {
\r
5605 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5609 size_t nDevices = dsDevices.size();
\r
5610 if ( nDevices == 0 ) {
\r
5611 // This should not happen because a check is made before this function is called.
\r
5612 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5616 if ( device >= nDevices ) {
\r
5617 // This should not happen because a check is made before this function is called.
\r
5618 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5622 if ( mode == OUTPUT ) {
\r
5623 if ( dsDevices[ device ].validId[0] == false ) {
\r
5624 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5625 errorText_ = errorStream_.str();
\r
5629 else { // mode == INPUT
\r
5630 if ( dsDevices[ device ].validId[1] == false ) {
\r
5631 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5632 errorText_ = errorStream_.str();
\r
5637 // According to a note in PortAudio, using GetDesktopWindow()
\r
5638 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5639 // that occur when the application's window is not the foreground
\r
5640 // window. Also, if the application window closes before the
\r
5641 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5642 // problems when using GetDesktopWindow() but it seems fine now
\r
5643 // (January 2010). I'll leave it commented here.
\r
5644 // HWND hWnd = GetForegroundWindow();
\r
5645 HWND hWnd = GetDesktopWindow();
\r
5647 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5648 // two. This is a judgement call and a value of two is probably too
\r
5649 // low for capture, but it should work for playback.
\r
5651 if ( options ) nBuffers = options->numberOfBuffers;
\r
5652 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5653 if ( nBuffers < 2 ) nBuffers = 3;
\r
5655 // Check the lower range of the user-specified buffer size and set
\r
5656 // (arbitrarily) to a lower bound of 32.
\r
5657 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5659 // Create the wave format structure. The data format setting will
\r
5660 // be determined later.
\r
5661 WAVEFORMATEX waveFormat;
\r
5662 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5663 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5664 waveFormat.nChannels = channels + firstChannel;
\r
5665 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5667 // Determine the device buffer size. By default, we'll use the value
\r
5668 // defined above (32K), but we will grow it to make allowances for
\r
5669 // very large software buffer sizes.
\r
5670 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5671 DWORD dsPointerLeadTime = 0;
\r
5673 void *ohandle = 0, *bhandle = 0;
\r
5675 if ( mode == OUTPUT ) {
\r
5677 LPDIRECTSOUND output;
\r
5678 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5679 if ( FAILED( result ) ) {
\r
5680 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5681 errorText_ = errorStream_.str();
\r
5686 outCaps.dwSize = sizeof( outCaps );
\r
5687 result = output->GetCaps( &outCaps );
\r
5688 if ( FAILED( result ) ) {
\r
5689 output->Release();
\r
5690 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5691 errorText_ = errorStream_.str();
\r
5695 // Check channel information.
\r
5696 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5697 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5698 errorText_ = errorStream_.str();
\r
5702 // Check format information. Use 16-bit format unless not
\r
5703 // supported or user requests 8-bit.
\r
5704 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5705 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5706 waveFormat.wBitsPerSample = 16;
\r
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5710 waveFormat.wBitsPerSample = 8;
\r
5711 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5713 stream_.userFormat = format;
\r
5715 // Update wave format structure and buffer information.
\r
5716 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5717 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5718 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5720 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5721 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5722 dsBufferSize *= 2;
\r
5724 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5725 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5726 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5727 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5728 if ( FAILED( result ) ) {
\r
5729 output->Release();
\r
5730 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5731 errorText_ = errorStream_.str();
\r
5735 // Even though we will write to the secondary buffer, we need to
\r
5736 // access the primary buffer to set the correct output format
\r
5737 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5738 // buffer description.
\r
5739 DSBUFFERDESC bufferDescription;
\r
5740 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5741 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5742 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5744 // Obtain the primary buffer
\r
5745 LPDIRECTSOUNDBUFFER buffer;
\r
5746 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5747 if ( FAILED( result ) ) {
\r
5748 output->Release();
\r
5749 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5750 errorText_ = errorStream_.str();
\r
5754 // Set the primary DS buffer sound format.
\r
5755 result = buffer->SetFormat( &waveFormat );
\r
5756 if ( FAILED( result ) ) {
\r
5757 output->Release();
\r
5758 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5759 errorText_ = errorStream_.str();
\r
5763 // Setup the secondary DS buffer description.
\r
5764 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5765 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5766 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5767 DSBCAPS_GLOBALFOCUS |
\r
5768 DSBCAPS_GETCURRENTPOSITION2 |
\r
5769 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5770 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5771 bufferDescription.lpwfxFormat = &waveFormat;
\r
5773 // Try to create the secondary DS buffer. If that doesn't work,
\r
5774 // try to use software mixing. Otherwise, there's a problem.
\r
5775 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5776 if ( FAILED( result ) ) {
\r
5777 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5778 DSBCAPS_GLOBALFOCUS |
\r
5779 DSBCAPS_GETCURRENTPOSITION2 |
\r
5780 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5781 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5782 if ( FAILED( result ) ) {
\r
5783 output->Release();
\r
5784 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5785 errorText_ = errorStream_.str();
\r
5790 // Get the buffer size ... might be different from what we specified.
\r
5792 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5793 result = buffer->GetCaps( &dsbcaps );
\r
5794 if ( FAILED( result ) ) {
\r
5795 output->Release();
\r
5796 buffer->Release();
\r
5797 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5798 errorText_ = errorStream_.str();
\r
5802 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5804 // Lock the DS buffer
\r
5807 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5808 if ( FAILED( result ) ) {
\r
5809 output->Release();
\r
5810 buffer->Release();
\r
5811 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5812 errorText_ = errorStream_.str();
\r
5816 // Zero the DS buffer
\r
5817 ZeroMemory( audioPtr, dataLen );
\r
5819 // Unlock the DS buffer
\r
5820 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5821 if ( FAILED( result ) ) {
\r
5822 output->Release();
\r
5823 buffer->Release();
\r
5824 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5825 errorText_ = errorStream_.str();
\r
5829 ohandle = (void *) output;
\r
5830 bhandle = (void *) buffer;
\r
5833 if ( mode == INPUT ) {
\r
5835 LPDIRECTSOUNDCAPTURE input;
\r
5836 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5837 if ( FAILED( result ) ) {
\r
5838 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5839 errorText_ = errorStream_.str();
\r
5844 inCaps.dwSize = sizeof( inCaps );
\r
5845 result = input->GetCaps( &inCaps );
\r
5846 if ( FAILED( result ) ) {
\r
5848 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5849 errorText_ = errorStream_.str();
\r
5853 // Check channel information.
\r
5854 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5855 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5859 // Check format information. Use 16-bit format unless user
\r
5860 // requests 8-bit.
\r
5861 DWORD deviceFormats;
\r
5862 if ( channels + firstChannel == 2 ) {
\r
5863 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5864 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5865 waveFormat.wBitsPerSample = 8;
\r
5866 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5868 else { // assume 16-bit is supported
\r
5869 waveFormat.wBitsPerSample = 16;
\r
5870 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5873 else { // channel == 1
\r
5874 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5875 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5876 waveFormat.wBitsPerSample = 8;
\r
5877 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5879 else { // assume 16-bit is supported
\r
5880 waveFormat.wBitsPerSample = 16;
\r
5881 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5884 stream_.userFormat = format;
\r
5886 // Update wave format structure and buffer information.
\r
5887 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5888 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5889 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5891 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5892 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5893 dsBufferSize *= 2;
\r
5895 // Setup the secondary DS buffer description.
\r
5896 DSCBUFFERDESC bufferDescription;
\r
5897 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5898 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5899 bufferDescription.dwFlags = 0;
\r
5900 bufferDescription.dwReserved = 0;
\r
5901 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5902 bufferDescription.lpwfxFormat = &waveFormat;
\r
5904 // Create the capture buffer.
\r
5905 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5906 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5907 if ( FAILED( result ) ) {
\r
5909 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5910 errorText_ = errorStream_.str();
\r
5914 // Get the buffer size ... might be different from what we specified.
\r
5915 DSCBCAPS dscbcaps;
\r
5916 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5917 result = buffer->GetCaps( &dscbcaps );
\r
5918 if ( FAILED( result ) ) {
\r
5920 buffer->Release();
\r
5921 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5922 errorText_ = errorStream_.str();
\r
5926 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5928 // NOTE: We could have a problem here if this is a duplex stream
\r
5929 // and the play and capture hardware buffer sizes are different
\r
5930 // (I'm actually not sure if that is a problem or not).
\r
5931 // Currently, we are not verifying that.
\r
5933 // Lock the capture buffer
\r
5936 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5937 if ( FAILED( result ) ) {
\r
5939 buffer->Release();
\r
5940 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5941 errorText_ = errorStream_.str();
\r
5945 // Zero the buffer
\r
5946 ZeroMemory( audioPtr, dataLen );
\r
5948 // Unlock the buffer
\r
5949 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5950 if ( FAILED( result ) ) {
\r
5952 buffer->Release();
\r
5953 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5954 errorText_ = errorStream_.str();
\r
5958 ohandle = (void *) input;
\r
5959 bhandle = (void *) buffer;
\r
5962 // Set various stream parameters
\r
5963 DsHandle *handle = 0;
\r
5964 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5965 stream_.nUserChannels[mode] = channels;
\r
5966 stream_.bufferSize = *bufferSize;
\r
5967 stream_.channelOffset[mode] = firstChannel;
\r
5968 stream_.deviceInterleaved[mode] = true;
\r
5969 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5970 else stream_.userInterleaved = true;
\r
5972 // Set flag for buffer conversion
\r
5973 stream_.doConvertBuffer[mode] = false;
\r
5974 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5975 stream_.doConvertBuffer[mode] = true;
\r
5976 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5977 stream_.doConvertBuffer[mode] = true;
\r
5978 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5979 stream_.nUserChannels[mode] > 1 )
\r
5980 stream_.doConvertBuffer[mode] = true;
\r
5982 // Allocate necessary internal buffers
\r
5983 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5984 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5985 if ( stream_.userBuffer[mode] == NULL ) {
\r
5986 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5990 if ( stream_.doConvertBuffer[mode] ) {
\r
5992 bool makeBuffer = true;
\r
5993 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5994 if ( mode == INPUT ) {
\r
5995 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5996 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5997 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
6001 if ( makeBuffer ) {
\r
6002 bufferBytes *= *bufferSize;
\r
6003 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6004 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6005 if ( stream_.deviceBuffer == NULL ) {
\r
6006 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
6012 // Allocate our DsHandle structures for the stream.
\r
6013 if ( stream_.apiHandle == 0 ) {
\r
6015 handle = new DsHandle;
\r
6017 catch ( std::bad_alloc& ) {
\r
6018 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
6022 // Create a manual-reset event.
\r
6023 handle->condition = CreateEvent( NULL, // no security
\r
6024 TRUE, // manual-reset
\r
6025 FALSE, // non-signaled initially
\r
6026 NULL ); // unnamed
\r
6027 stream_.apiHandle = (void *) handle;
\r
6030 handle = (DsHandle *) stream_.apiHandle;
\r
6031 handle->id[mode] = ohandle;
\r
6032 handle->buffer[mode] = bhandle;
\r
6033 handle->dsBufferSize[mode] = dsBufferSize;
\r
6034 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
6036 stream_.device[mode] = device;
\r
6037 stream_.state = STREAM_STOPPED;
\r
6038 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
6039 // We had already set up an output stream.
\r
6040 stream_.mode = DUPLEX;
\r
6042 stream_.mode = mode;
\r
6043 stream_.nBuffers = nBuffers;
\r
6044 stream_.sampleRate = sampleRate;
\r
6046 // Setup the buffer conversion information structure.
\r
6047 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6049 // Setup the callback thread.
\r
6050 if ( stream_.callbackInfo.isRunning == false ) {
\r
6051 unsigned threadId;
\r
6052 stream_.callbackInfo.isRunning = true;
\r
6053 stream_.callbackInfo.object = (void *) this;
\r
6054 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
6055 &stream_.callbackInfo, 0, &threadId );
\r
6056 if ( stream_.callbackInfo.thread == 0 ) {
\r
6057 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
6061 // Boost DS thread priority
\r
6062 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
6068 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6069 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6070 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6071 if ( buffer ) buffer->Release();
\r
6072 object->Release();
\r
6074 if ( handle->buffer[1] ) {
\r
6075 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6076 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6077 if ( buffer ) buffer->Release();
\r
6078 object->Release();
\r
6080 CloseHandle( handle->condition );
\r
6082 stream_.apiHandle = 0;
\r
6085 for ( int i=0; i<2; i++ ) {
\r
6086 if ( stream_.userBuffer[i] ) {
\r
6087 free( stream_.userBuffer[i] );
\r
6088 stream_.userBuffer[i] = 0;
\r
6092 if ( stream_.deviceBuffer ) {
\r
6093 free( stream_.deviceBuffer );
\r
6094 stream_.deviceBuffer = 0;
\r
6097 stream_.state = STREAM_CLOSED;
\r
6101 void RtApiDs :: closeStream()
\r
6103 if ( stream_.state == STREAM_CLOSED ) {
\r
6104 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6105 error( RtAudioError::WARNING );
\r
6109 // Stop the callback thread.
\r
6110 stream_.callbackInfo.isRunning = false;
\r
6111 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6112 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6114 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6116 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6117 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6118 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6121 buffer->Release();
\r
6123 object->Release();
\r
6125 if ( handle->buffer[1] ) {
\r
6126 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6127 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6130 buffer->Release();
\r
6132 object->Release();
\r
6134 CloseHandle( handle->condition );
\r
6136 stream_.apiHandle = 0;
\r
6139 for ( int i=0; i<2; i++ ) {
\r
6140 if ( stream_.userBuffer[i] ) {
\r
6141 free( stream_.userBuffer[i] );
\r
6142 stream_.userBuffer[i] = 0;
\r
6146 if ( stream_.deviceBuffer ) {
\r
6147 free( stream_.deviceBuffer );
\r
6148 stream_.deviceBuffer = 0;
\r
6151 stream_.mode = UNINITIALIZED;
\r
6152 stream_.state = STREAM_CLOSED;
\r
6155 void RtApiDs :: startStream()
\r
6158 if ( stream_.state == STREAM_RUNNING ) {
\r
6159 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6160 error( RtAudioError::WARNING );
\r
6164 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6166 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6167 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6168 // this is already in effect.
\r
6169 timeBeginPeriod( 1 );
\r
6171 buffersRolling = false;
\r
6172 duplexPrerollBytes = 0;
\r
6174 if ( stream_.mode == DUPLEX ) {
\r
6175 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6176 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6179 HRESULT result = 0;
\r
6180 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6182 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6183 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6184 if ( FAILED( result ) ) {
\r
6185 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6186 errorText_ = errorStream_.str();
\r
6191 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6193 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6194 result = buffer->Start( DSCBSTART_LOOPING );
\r
6195 if ( FAILED( result ) ) {
\r
6196 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6197 errorText_ = errorStream_.str();
\r
6202 handle->drainCounter = 0;
\r
6203 handle->internalDrain = false;
\r
6204 ResetEvent( handle->condition );
\r
6205 stream_.state = STREAM_RUNNING;
\r
6208 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6211 void RtApiDs :: stopStream()
\r
6214 if ( stream_.state == STREAM_STOPPED ) {
\r
6215 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6216 error( RtAudioError::WARNING );
\r
6220 HRESULT result = 0;
\r
6223 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6224 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6225 if ( handle->drainCounter == 0 ) {
\r
6226 handle->drainCounter = 2;
\r
6227 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6230 stream_.state = STREAM_STOPPED;
\r
6232 MUTEX_LOCK( &stream_.mutex );
\r
6234 // Stop the buffer and clear memory
\r
6235 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6236 result = buffer->Stop();
\r
6237 if ( FAILED( result ) ) {
\r
6238 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6239 errorText_ = errorStream_.str();
\r
6243 // Lock the buffer and clear it so that if we start to play again,
\r
6244 // we won't have old data playing.
\r
6245 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6246 if ( FAILED( result ) ) {
\r
6247 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6248 errorText_ = errorStream_.str();
\r
6252 // Zero the DS buffer
\r
6253 ZeroMemory( audioPtr, dataLen );
\r
6255 // Unlock the DS buffer
\r
6256 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6257 if ( FAILED( result ) ) {
\r
6258 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6259 errorText_ = errorStream_.str();
\r
6263 // If we start playing again, we must begin at beginning of buffer.
\r
6264 handle->bufferPointer[0] = 0;
\r
6267 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6268 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6272 stream_.state = STREAM_STOPPED;
\r
6274 if ( stream_.mode != DUPLEX )
\r
6275 MUTEX_LOCK( &stream_.mutex );
\r
6277 result = buffer->Stop();
\r
6278 if ( FAILED( result ) ) {
\r
6279 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6280 errorText_ = errorStream_.str();
\r
6284 // Lock the buffer and clear it so that if we start to play again,
\r
6285 // we won't have old data playing.
\r
6286 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6287 if ( FAILED( result ) ) {
\r
6288 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6289 errorText_ = errorStream_.str();
\r
6293 // Zero the DS buffer
\r
6294 ZeroMemory( audioPtr, dataLen );
\r
6296 // Unlock the DS buffer
\r
6297 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6298 if ( FAILED( result ) ) {
\r
6299 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6300 errorText_ = errorStream_.str();
\r
6304 // If we start recording again, we must begin at beginning of buffer.
\r
6305 handle->bufferPointer[1] = 0;
\r
6309 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6310 MUTEX_UNLOCK( &stream_.mutex );
\r
6312 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6315 void RtApiDs :: abortStream()
\r
6318 if ( stream_.state == STREAM_STOPPED ) {
\r
6319 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6320 error( RtAudioError::WARNING );
\r
6324 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6325 handle->drainCounter = 2;
\r
6330 void RtApiDs :: callbackEvent()
\r
6332 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6333 Sleep( 50 ); // sleep 50 milliseconds
\r
6337 if ( stream_.state == STREAM_CLOSED ) {
\r
6338 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6339 error( RtAudioError::WARNING );
\r
6343 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6344 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6346 // Check if we were draining the stream and signal is finished.
\r
6347 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6349 stream_.state = STREAM_STOPPING;
\r
6350 if ( handle->internalDrain == false )
\r
6351 SetEvent( handle->condition );
\r
6357 // Invoke user callback to get fresh output data UNLESS we are
\r
6358 // draining stream.
\r
6359 if ( handle->drainCounter == 0 ) {
\r
6360 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6361 double streamTime = getStreamTime();
\r
6362 RtAudioStreamStatus status = 0;
\r
6363 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6364 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6365 handle->xrun[0] = false;
\r
6367 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6368 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6369 handle->xrun[1] = false;
\r
6371 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6372 stream_.bufferSize, streamTime, status, info->userData );
\r
6373 if ( cbReturnValue == 2 ) {
\r
6374 stream_.state = STREAM_STOPPING;
\r
6375 handle->drainCounter = 2;
\r
6379 else if ( cbReturnValue == 1 ) {
\r
6380 handle->drainCounter = 1;
\r
6381 handle->internalDrain = true;
\r
6386 DWORD currentWritePointer, safeWritePointer;
\r
6387 DWORD currentReadPointer, safeReadPointer;
\r
6388 UINT nextWritePointer;
\r
6390 LPVOID buffer1 = NULL;
\r
6391 LPVOID buffer2 = NULL;
\r
6392 DWORD bufferSize1 = 0;
\r
6393 DWORD bufferSize2 = 0;
\r
6398 MUTEX_LOCK( &stream_.mutex );
\r
6399 if ( stream_.state == STREAM_STOPPED ) {
\r
6400 MUTEX_UNLOCK( &stream_.mutex );
\r
6404 if ( buffersRolling == false ) {
\r
6405 if ( stream_.mode == DUPLEX ) {
\r
6406 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6408 // It takes a while for the devices to get rolling. As a result,
\r
6409 // there's no guarantee that the capture and write device pointers
\r
6410 // will move in lockstep. Wait here for both devices to start
\r
6411 // rolling, and then set our buffer pointers accordingly.
\r
6412 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6413 // bytes later than the write buffer.
\r
6415 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6416 // take place between the two GetCurrentPosition calls... but I'm
\r
6417 // really not sure how to solve the problem. Temporarily boost to
\r
6418 // Realtime priority, maybe; but I'm not sure what priority the
\r
6419 // DirectSound service threads run at. We *should* be roughly
\r
6420 // within a ms or so of correct.
\r
6422 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6423 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6425 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6427 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6428 if ( FAILED( result ) ) {
\r
6429 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6430 errorText_ = errorStream_.str();
\r
6431 MUTEX_UNLOCK( &stream_.mutex );
\r
6432 error( RtAudioError::SYSTEM_ERROR );
\r
6435 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6436 if ( FAILED( result ) ) {
\r
6437 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6438 errorText_ = errorStream_.str();
\r
6439 MUTEX_UNLOCK( &stream_.mutex );
\r
6440 error( RtAudioError::SYSTEM_ERROR );
\r
6444 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6445 if ( FAILED( result ) ) {
\r
6446 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6447 errorText_ = errorStream_.str();
\r
6448 MUTEX_UNLOCK( &stream_.mutex );
\r
6449 error( RtAudioError::SYSTEM_ERROR );
\r
6452 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6453 if ( FAILED( result ) ) {
\r
6454 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6455 errorText_ = errorStream_.str();
\r
6456 MUTEX_UNLOCK( &stream_.mutex );
\r
6457 error( RtAudioError::SYSTEM_ERROR );
\r
6460 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6464 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6466 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6467 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6468 handle->bufferPointer[1] = safeReadPointer;
\r
6470 else if ( stream_.mode == OUTPUT ) {
\r
6472 // Set the proper nextWritePosition after initial startup.
\r
6473 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6474 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6475 if ( FAILED( result ) ) {
\r
6476 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6477 errorText_ = errorStream_.str();
\r
6478 MUTEX_UNLOCK( &stream_.mutex );
\r
6479 error( RtAudioError::SYSTEM_ERROR );
\r
6482 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6483 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6486 buffersRolling = true;
\r
6489 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6491 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6493 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6494 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6495 bufferBytes *= formatBytes( stream_.userFormat );
\r
6496 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6499 // Setup parameters and do buffer conversion if necessary.
\r
6500 if ( stream_.doConvertBuffer[0] ) {
\r
6501 buffer = stream_.deviceBuffer;
\r
6502 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6503 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6504 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6507 buffer = stream_.userBuffer[0];
\r
6508 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6509 bufferBytes *= formatBytes( stream_.userFormat );
\r
6512 // No byte swapping necessary in DirectSound implementation.
\r
6514 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6515 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6517 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6518 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6520 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6521 nextWritePointer = handle->bufferPointer[0];
\r
6523 DWORD endWrite, leadPointer;
\r
6525 // Find out where the read and "safe write" pointers are.
\r
6526 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6527 if ( FAILED( result ) ) {
\r
6528 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6529 errorText_ = errorStream_.str();
\r
6530 MUTEX_UNLOCK( &stream_.mutex );
\r
6531 error( RtAudioError::SYSTEM_ERROR );
\r
6535 // We will copy our output buffer into the region between
\r
6536 // safeWritePointer and leadPointer. If leadPointer is not
\r
6537 // beyond the next endWrite position, wait until it is.
\r
6538 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6539 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6540 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6541 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6542 endWrite = nextWritePointer + bufferBytes;
\r
6544 // Check whether the entire write region is behind the play pointer.
\r
6545 if ( leadPointer >= endWrite ) break;
\r
6547 // If we are here, then we must wait until the leadPointer advances
\r
6548 // beyond the end of our next write region. We use the
\r
6549 // Sleep() function to suspend operation until that happens.
\r
6550 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6551 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6552 if ( millis < 1.0 ) millis = 1.0;
\r
6553 Sleep( (DWORD) millis );
\r
6556 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6557 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6558 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6559 handle->xrun[0] = true;
\r
6560 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6561 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6562 handle->bufferPointer[0] = nextWritePointer;
\r
6563 endWrite = nextWritePointer + bufferBytes;
\r
6566 // Lock free space in the buffer
\r
6567 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6568 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6569 if ( FAILED( result ) ) {
\r
6570 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6571 errorText_ = errorStream_.str();
\r
6572 MUTEX_UNLOCK( &stream_.mutex );
\r
6573 error( RtAudioError::SYSTEM_ERROR );
\r
6577 // Copy our buffer into the DS buffer
\r
6578 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6579 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6581 // Update our buffer offset and unlock sound buffer
\r
6582 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6583 if ( FAILED( result ) ) {
\r
6584 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6585 errorText_ = errorStream_.str();
\r
6586 MUTEX_UNLOCK( &stream_.mutex );
\r
6587 error( RtAudioError::SYSTEM_ERROR );
\r
6590 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6591 handle->bufferPointer[0] = nextWritePointer;
\r
6594 // Don't bother draining input
\r
6595 if ( handle->drainCounter ) {
\r
6596 handle->drainCounter++;
\r
6600 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6602 // Setup parameters.
\r
6603 if ( stream_.doConvertBuffer[1] ) {
\r
6604 buffer = stream_.deviceBuffer;
\r
6605 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6606 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6609 buffer = stream_.userBuffer[1];
\r
6610 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6611 bufferBytes *= formatBytes( stream_.userFormat );
\r
6614 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6615 long nextReadPointer = handle->bufferPointer[1];
\r
6616 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6618 // Find out where the write and "safe read" pointers are.
\r
6619 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6620 if ( FAILED( result ) ) {
\r
6621 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6622 errorText_ = errorStream_.str();
\r
6623 MUTEX_UNLOCK( &stream_.mutex );
\r
6624 error( RtAudioError::SYSTEM_ERROR );
\r
6628 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6629 DWORD endRead = nextReadPointer + bufferBytes;
\r
6631 // Handling depends on whether we are INPUT or DUPLEX.
\r
6632 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6633 // then a wait here will drag the write pointers into the forbidden zone.
\r
6635 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6636 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6637 // practical way to sync up the read and write pointers reliably, given the
\r
6638 // the very complex relationship between phase and increment of the read and write
\r
6641 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6642 // provide a pre-roll period of 0.5 seconds in which we return
\r
6643 // zeros from the read buffer while the pointers sync up.
\r
6645 if ( stream_.mode == DUPLEX ) {
\r
6646 if ( safeReadPointer < endRead ) {
\r
6647 if ( duplexPrerollBytes <= 0 ) {
\r
6648 // Pre-roll time over. Be more agressive.
\r
6649 int adjustment = endRead-safeReadPointer;
\r
6651 handle->xrun[1] = true;
\r
6653 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6654 // and perform fine adjustments later.
\r
6655 // - small adjustments: back off by twice as much.
\r
6656 if ( adjustment >= 2*bufferBytes )
\r
6657 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6659 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6661 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6665 // In pre=roll time. Just do it.
\r
6666 nextReadPointer = safeReadPointer - bufferBytes;
\r
6667 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6669 endRead = nextReadPointer + bufferBytes;
\r
6672 else { // mode == INPUT
\r
6673 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6674 // See comments for playback.
\r
6675 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6676 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6677 if ( millis < 1.0 ) millis = 1.0;
\r
6678 Sleep( (DWORD) millis );
\r
6680 // Wake up and find out where we are now.
\r
6681 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6682 if ( FAILED( result ) ) {
\r
6683 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6684 errorText_ = errorStream_.str();
\r
6685 MUTEX_UNLOCK( &stream_.mutex );
\r
6686 error( RtAudioError::SYSTEM_ERROR );
\r
6690 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6694 // Lock free space in the buffer
\r
6695 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6696 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6697 if ( FAILED( result ) ) {
\r
6698 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6699 errorText_ = errorStream_.str();
\r
6700 MUTEX_UNLOCK( &stream_.mutex );
\r
6701 error( RtAudioError::SYSTEM_ERROR );
\r
6705 if ( duplexPrerollBytes <= 0 ) {
\r
6706 // Copy our buffer into the DS buffer
\r
6707 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6708 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6711 memset( buffer, 0, bufferSize1 );
\r
6712 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6713 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6716 // Update our buffer offset and unlock sound buffer
\r
6717 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6718 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6719 if ( FAILED( result ) ) {
\r
6720 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6721 errorText_ = errorStream_.str();
\r
6722 MUTEX_UNLOCK( &stream_.mutex );
\r
6723 error( RtAudioError::SYSTEM_ERROR );
\r
6726 handle->bufferPointer[1] = nextReadPointer;
\r
6728 // No byte swapping necessary in DirectSound implementation.
\r
6730 // If necessary, convert 8-bit data from unsigned to signed.
\r
6731 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6732 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6734 // Do buffer conversion if necessary.
\r
6735 if ( stream_.doConvertBuffer[1] )
\r
6736 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6740 MUTEX_UNLOCK( &stream_.mutex );
\r
6741 RtApi::tickStreamTime();
\r
6744 // Definitions for utility functions and callbacks
\r
6745 // specific to the DirectSound implementation.
\r
6747 static unsigned __stdcall callbackHandler( void *ptr )
\r
6749 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6750 RtApiDs *object = (RtApiDs *) info->object;
\r
6751 bool* isRunning = &info->isRunning;
\r
6753 while ( *isRunning == true ) {
\r
6754 object->callbackEvent();
\r
6757 _endthreadex( 0 );
\r
6761 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6762 LPCTSTR description,
\r
6763 LPCTSTR /*module*/,
\r
6764 LPVOID lpContext )
\r
6766 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6767 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6770 bool validDevice = false;
\r
6771 if ( probeInfo.isInput == true ) {
\r
6773 LPDIRECTSOUNDCAPTURE object;
\r
6775 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6776 if ( hr != DS_OK ) return TRUE;
\r
6778 caps.dwSize = sizeof(caps);
\r
6779 hr = object->GetCaps( &caps );
\r
6780 if ( hr == DS_OK ) {
\r
6781 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6782 validDevice = true;
\r
6784 object->Release();
\r
6788 LPDIRECTSOUND object;
\r
6789 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6790 if ( hr != DS_OK ) return TRUE;
\r
6792 caps.dwSize = sizeof(caps);
\r
6793 hr = object->GetCaps( &caps );
\r
6794 if ( hr == DS_OK ) {
\r
6795 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6796 validDevice = true;
\r
6798 object->Release();
\r
6801 // If good device, then save its name and guid.
\r
6802 std::string name = convertCharPointerToStdString( description );
\r
6803 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6804 if ( lpguid == NULL )
\r
6805 name = "Default Device";
\r
6806 if ( validDevice ) {
\r
6807 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6808 if ( dsDevices[i].name == name ) {
\r
6809 dsDevices[i].found = true;
\r
6810 if ( probeInfo.isInput ) {
\r
6811 dsDevices[i].id[1] = lpguid;
\r
6812 dsDevices[i].validId[1] = true;
\r
6815 dsDevices[i].id[0] = lpguid;
\r
6816 dsDevices[i].validId[0] = true;
\r
6823 device.name = name;
\r
6824 device.found = true;
\r
6825 if ( probeInfo.isInput ) {
\r
6826 device.id[1] = lpguid;
\r
6827 device.validId[1] = true;
\r
6830 device.id[0] = lpguid;
\r
6831 device.validId[0] = true;
\r
6833 dsDevices.push_back( device );
\r
6839 static const char* getErrorString( int code )
\r
6843 case DSERR_ALLOCATED:
\r
6844 return "Already allocated";
\r
6846 case DSERR_CONTROLUNAVAIL:
\r
6847 return "Control unavailable";
\r
6849 case DSERR_INVALIDPARAM:
\r
6850 return "Invalid parameter";
\r
6852 case DSERR_INVALIDCALL:
\r
6853 return "Invalid call";
\r
6855 case DSERR_GENERIC:
\r
6856 return "Generic error";
\r
6858 case DSERR_PRIOLEVELNEEDED:
\r
6859 return "Priority level needed";
\r
6861 case DSERR_OUTOFMEMORY:
\r
6862 return "Out of memory";
\r
6864 case DSERR_BADFORMAT:
\r
6865 return "The sample rate or the channel format is not supported";
\r
6867 case DSERR_UNSUPPORTED:
\r
6868 return "Not supported";
\r
6870 case DSERR_NODRIVER:
\r
6871 return "No driver";
\r
6873 case DSERR_ALREADYINITIALIZED:
\r
6874 return "Already initialized";
\r
6876 case DSERR_NOAGGREGATION:
\r
6877 return "No aggregation";
\r
6879 case DSERR_BUFFERLOST:
\r
6880 return "Buffer lost";
\r
6882 case DSERR_OTHERAPPHASPRIO:
\r
6883 return "Another application already has priority";
\r
6885 case DSERR_UNINITIALIZED:
\r
6886 return "Uninitialized";
\r
6889 return "DirectSound unknown error";
\r
6892 //******************** End of __WINDOWS_DS__ *********************//
\r
6896 #if defined(__LINUX_ALSA__)
\r
6898 #include <alsa/asoundlib.h>
\r
6899 #include <unistd.h>
\r
6901 // A structure to hold various information related to the ALSA API
\r
6902 // implementation.
\r
6903 struct AlsaHandle {
\r
6904 snd_pcm_t *handles[2];
\r
6905 bool synchronized;
\r
6907 pthread_cond_t runnable_cv;
\r
6911 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6914 static void *alsaCallbackHandler( void * ptr );
\r
6916 RtApiAlsa :: RtApiAlsa()
\r
6918 // Nothing to do here.
\r
6921 RtApiAlsa :: ~RtApiAlsa()
\r
6923 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6926 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6928 unsigned nDevices = 0;
\r
6929 int result, subdevice, card;
\r
6931 snd_ctl_t *handle;
\r
6933 // Count cards and devices
\r
6935 snd_card_next( &card );
\r
6936 while ( card >= 0 ) {
\r
6937 sprintf( name, "hw:%d", card );
\r
6938 result = snd_ctl_open( &handle, name, 0 );
\r
6939 if ( result < 0 ) {
\r
6940 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6941 errorText_ = errorStream_.str();
\r
6942 error( RtAudioError::WARNING );
\r
6947 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6948 if ( result < 0 ) {
\r
6949 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6950 errorText_ = errorStream_.str();
\r
6951 error( RtAudioError::WARNING );
\r
6954 if ( subdevice < 0 )
\r
6959 snd_ctl_close( handle );
\r
6960 snd_card_next( &card );
\r
6963 result = snd_ctl_open( &handle, "default", 0 );
\r
6964 if (result == 0) {
\r
6966 snd_ctl_close( handle );
\r
6972 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6974 RtAudio::DeviceInfo info;
\r
6975 info.probed = false;
\r
6977 unsigned nDevices = 0;
\r
6978 int result, subdevice, card;
\r
6980 snd_ctl_t *chandle;
\r
6982 // Count cards and devices
\r
6985 snd_card_next( &card );
\r
6986 while ( card >= 0 ) {
\r
6987 sprintf( name, "hw:%d", card );
\r
6988 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6989 if ( result < 0 ) {
\r
6990 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6991 errorText_ = errorStream_.str();
\r
6992 error( RtAudioError::WARNING );
\r
6997 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6998 if ( result < 0 ) {
\r
6999 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7000 errorText_ = errorStream_.str();
\r
7001 error( RtAudioError::WARNING );
\r
7004 if ( subdevice < 0 ) break;
\r
7005 if ( nDevices == device ) {
\r
7006 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7012 snd_ctl_close( chandle );
\r
7013 snd_card_next( &card );
\r
7016 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7017 if ( result == 0 ) {
\r
7018 if ( nDevices == device ) {
\r
7019 strcpy( name, "default" );
\r
7025 if ( nDevices == 0 ) {
\r
7026 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
7027 error( RtAudioError::INVALID_USE );
\r
7031 if ( device >= nDevices ) {
\r
7032 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
7033 error( RtAudioError::INVALID_USE );
\r
7039 // If a stream is already open, we cannot probe the stream devices.
\r
7040 // Thus, use the saved results.
\r
7041 if ( stream_.state != STREAM_CLOSED &&
\r
7042 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
7043 snd_ctl_close( chandle );
\r
7044 if ( device >= devices_.size() ) {
\r
7045 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
7046 error( RtAudioError::WARNING );
\r
7049 return devices_[ device ];
\r
7052 int openMode = SND_PCM_ASYNC;
\r
7053 snd_pcm_stream_t stream;
\r
7054 snd_pcm_info_t *pcminfo;
\r
7055 snd_pcm_info_alloca( &pcminfo );
\r
7056 snd_pcm_t *phandle;
\r
7057 snd_pcm_hw_params_t *params;
\r
7058 snd_pcm_hw_params_alloca( ¶ms );
\r
7060 // First try for playback unless default device (which has subdev -1)
\r
7061 stream = SND_PCM_STREAM_PLAYBACK;
\r
7062 snd_pcm_info_set_stream( pcminfo, stream );
\r
7063 if ( subdevice != -1 ) {
\r
7064 snd_pcm_info_set_device( pcminfo, subdevice );
\r
7065 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
7067 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7068 if ( result < 0 ) {
\r
7069 // Device probably doesn't support playback.
\r
7070 goto captureProbe;
\r
7074 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7075 if ( result < 0 ) {
\r
7076 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7077 errorText_ = errorStream_.str();
\r
7078 error( RtAudioError::WARNING );
\r
7079 goto captureProbe;
\r
7082 // The device is open ... fill the parameter structure.
\r
7083 result = snd_pcm_hw_params_any( phandle, params );
\r
7084 if ( result < 0 ) {
\r
7085 snd_pcm_close( phandle );
\r
7086 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7087 errorText_ = errorStream_.str();
\r
7088 error( RtAudioError::WARNING );
\r
7089 goto captureProbe;
\r
7092 // Get output channel information.
\r
7093 unsigned int value;
\r
7094 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7095 if ( result < 0 ) {
\r
7096 snd_pcm_close( phandle );
\r
7097 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7098 errorText_ = errorStream_.str();
\r
7099 error( RtAudioError::WARNING );
\r
7100 goto captureProbe;
\r
7102 info.outputChannels = value;
\r
7103 snd_pcm_close( phandle );
\r
7106 stream = SND_PCM_STREAM_CAPTURE;
\r
7107 snd_pcm_info_set_stream( pcminfo, stream );
\r
7109 // Now try for capture unless default device (with subdev = -1)
\r
7110 if ( subdevice != -1 ) {
\r
7111 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7112 snd_ctl_close( chandle );
\r
7113 if ( result < 0 ) {
\r
7114 // Device probably doesn't support capture.
\r
7115 if ( info.outputChannels == 0 ) return info;
\r
7116 goto probeParameters;
\r
7120 snd_ctl_close( chandle );
\r
7122 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7123 if ( result < 0 ) {
\r
7124 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7125 errorText_ = errorStream_.str();
\r
7126 error( RtAudioError::WARNING );
\r
7127 if ( info.outputChannels == 0 ) return info;
\r
7128 goto probeParameters;
\r
7131 // The device is open ... fill the parameter structure.
\r
7132 result = snd_pcm_hw_params_any( phandle, params );
\r
7133 if ( result < 0 ) {
\r
7134 snd_pcm_close( phandle );
\r
7135 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7136 errorText_ = errorStream_.str();
\r
7137 error( RtAudioError::WARNING );
\r
7138 if ( info.outputChannels == 0 ) return info;
\r
7139 goto probeParameters;
\r
7142 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7143 if ( result < 0 ) {
\r
7144 snd_pcm_close( phandle );
\r
7145 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7146 errorText_ = errorStream_.str();
\r
7147 error( RtAudioError::WARNING );
\r
7148 if ( info.outputChannels == 0 ) return info;
\r
7149 goto probeParameters;
\r
7151 info.inputChannels = value;
\r
7152 snd_pcm_close( phandle );
\r
7154 // If device opens for both playback and capture, we determine the channels.
\r
7155 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7156 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7158 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7159 if ( device == 0 && info.outputChannels > 0 )
\r
7160 info.isDefaultOutput = true;
\r
7161 if ( device == 0 && info.inputChannels > 0 )
\r
7162 info.isDefaultInput = true;
\r
7165 // At this point, we just need to figure out the supported data
\r
7166 // formats and sample rates. We'll proceed by opening the device in
\r
7167 // the direction with the maximum number of channels, or playback if
\r
7168 // they are equal. This might limit our sample rate options, but so
\r
7171 if ( info.outputChannels >= info.inputChannels )
\r
7172 stream = SND_PCM_STREAM_PLAYBACK;
\r
7174 stream = SND_PCM_STREAM_CAPTURE;
\r
7175 snd_pcm_info_set_stream( pcminfo, stream );
\r
7177 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7178 if ( result < 0 ) {
\r
7179 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7180 errorText_ = errorStream_.str();
\r
7181 error( RtAudioError::WARNING );
\r
7185 // The device is open ... fill the parameter structure.
\r
7186 result = snd_pcm_hw_params_any( phandle, params );
\r
7187 if ( result < 0 ) {
\r
7188 snd_pcm_close( phandle );
\r
7189 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7190 errorText_ = errorStream_.str();
\r
7191 error( RtAudioError::WARNING );
\r
7195 // Test our discrete set of sample rate values.
\r
7196 info.sampleRates.clear();
\r
7197 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7198 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7199 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7201 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7202 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7205 if ( info.sampleRates.size() == 0 ) {
\r
7206 snd_pcm_close( phandle );
\r
7207 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7208 errorText_ = errorStream_.str();
\r
7209 error( RtAudioError::WARNING );
\r
7213 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7214 snd_pcm_format_t format;
\r
7215 info.nativeFormats = 0;
\r
7216 format = SND_PCM_FORMAT_S8;
\r
7217 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7218 info.nativeFormats |= RTAUDIO_SINT8;
\r
7219 format = SND_PCM_FORMAT_S16;
\r
7220 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7221 info.nativeFormats |= RTAUDIO_SINT16;
\r
7222 format = SND_PCM_FORMAT_S24;
\r
7223 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7224 info.nativeFormats |= RTAUDIO_SINT24;
\r
7225 format = SND_PCM_FORMAT_S32;
\r
7226 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7227 info.nativeFormats |= RTAUDIO_SINT32;
\r
7228 format = SND_PCM_FORMAT_FLOAT;
\r
7229 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7230 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7231 format = SND_PCM_FORMAT_FLOAT64;
\r
7232 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7233 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7235 // Check that we have at least one supported format
\r
7236 if ( info.nativeFormats == 0 ) {
\r
7237 snd_pcm_close( phandle );
\r
7238 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7239 errorText_ = errorStream_.str();
\r
7240 error( RtAudioError::WARNING );
\r
7244 // Get the device name
\r
7246 result = snd_card_get_name( card, &cardname );
\r
7247 if ( result >= 0 ) {
\r
7248 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7253 // That's all ... close the device and return
\r
7254 snd_pcm_close( phandle );
\r
7255 info.probed = true;
\r
7259 void RtApiAlsa :: saveDeviceInfo( void )
\r
7263 unsigned int nDevices = getDeviceCount();
\r
7264 devices_.resize( nDevices );
\r
7265 for ( unsigned int i=0; i<nDevices; i++ )
\r
7266 devices_[i] = getDeviceInfo( i );
\r
7269 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7270 unsigned int firstChannel, unsigned int sampleRate,
\r
7271 RtAudioFormat format, unsigned int *bufferSize,
\r
7272 RtAudio::StreamOptions *options )
\r
7275 #if defined(__RTAUDIO_DEBUG__)
\r
7276 snd_output_t *out;
\r
7277 snd_output_stdio_attach(&out, stderr, 0);
\r
7280 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7282 unsigned nDevices = 0;
\r
7283 int result, subdevice, card;
\r
7285 snd_ctl_t *chandle;
\r
7287 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7288 snprintf(name, sizeof(name), "%s", "default");
\r
7290 // Count cards and devices
\r
7292 snd_card_next( &card );
\r
7293 while ( card >= 0 ) {
\r
7294 sprintf( name, "hw:%d", card );
\r
7295 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7296 if ( result < 0 ) {
\r
7297 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7298 errorText_ = errorStream_.str();
\r
7303 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7304 if ( result < 0 ) break;
\r
7305 if ( subdevice < 0 ) break;
\r
7306 if ( nDevices == device ) {
\r
7307 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7308 snd_ctl_close( chandle );
\r
7313 snd_ctl_close( chandle );
\r
7314 snd_card_next( &card );
\r
7317 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7318 if ( result == 0 ) {
\r
7319 if ( nDevices == device ) {
\r
7320 strcpy( name, "default" );
\r
7326 if ( nDevices == 0 ) {
\r
7327 // This should not happen because a check is made before this function is called.
\r
7328 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7332 if ( device >= nDevices ) {
\r
7333 // This should not happen because a check is made before this function is called.
\r
7334 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7341 // The getDeviceInfo() function will not work for a device that is
\r
7342 // already open. Thus, we'll probe the system before opening a
\r
7343 // stream and save the results for use by getDeviceInfo().
\r
7344 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7345 this->saveDeviceInfo();
\r
7347 snd_pcm_stream_t stream;
\r
7348 if ( mode == OUTPUT )
\r
7349 stream = SND_PCM_STREAM_PLAYBACK;
\r
7351 stream = SND_PCM_STREAM_CAPTURE;
\r
7353 snd_pcm_t *phandle;
\r
7354 int openMode = SND_PCM_ASYNC;
\r
7355 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7356 if ( result < 0 ) {
\r
7357 if ( mode == OUTPUT )
\r
7358 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7360 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7361 errorText_ = errorStream_.str();
\r
7365 // Fill the parameter structure.
\r
7366 snd_pcm_hw_params_t *hw_params;
\r
7367 snd_pcm_hw_params_alloca( &hw_params );
\r
7368 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7369 if ( result < 0 ) {
\r
7370 snd_pcm_close( phandle );
\r
7371 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7372 errorText_ = errorStream_.str();
\r
7376 #if defined(__RTAUDIO_DEBUG__)
\r
7377 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7378 snd_pcm_hw_params_dump( hw_params, out );
\r
7381 // Set access ... check user preference.
\r
7382 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7383 stream_.userInterleaved = false;
\r
7384 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7385 if ( result < 0 ) {
\r
7386 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7387 stream_.deviceInterleaved[mode] = true;
\r
7390 stream_.deviceInterleaved[mode] = false;
\r
7393 stream_.userInterleaved = true;
\r
7394 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7395 if ( result < 0 ) {
\r
7396 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7397 stream_.deviceInterleaved[mode] = false;
\r
7400 stream_.deviceInterleaved[mode] = true;
\r
7403 if ( result < 0 ) {
\r
7404 snd_pcm_close( phandle );
\r
7405 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7406 errorText_ = errorStream_.str();
\r
7410 // Determine how to set the device format.
\r
7411 stream_.userFormat = format;
\r
7412 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7414 if ( format == RTAUDIO_SINT8 )
\r
7415 deviceFormat = SND_PCM_FORMAT_S8;
\r
7416 else if ( format == RTAUDIO_SINT16 )
\r
7417 deviceFormat = SND_PCM_FORMAT_S16;
\r
7418 else if ( format == RTAUDIO_SINT24 )
\r
7419 deviceFormat = SND_PCM_FORMAT_S24;
\r
7420 else if ( format == RTAUDIO_SINT32 )
\r
7421 deviceFormat = SND_PCM_FORMAT_S32;
\r
7422 else if ( format == RTAUDIO_FLOAT32 )
\r
7423 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7424 else if ( format == RTAUDIO_FLOAT64 )
\r
7425 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7427 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7428 stream_.deviceFormat[mode] = format;
\r
7432 // The user requested format is not natively supported by the device.
\r
7433 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7434 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7435 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7439 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7440 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7441 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7445 deviceFormat = SND_PCM_FORMAT_S32;
\r
7446 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7447 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7451 deviceFormat = SND_PCM_FORMAT_S24;
\r
7452 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7453 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7457 deviceFormat = SND_PCM_FORMAT_S16;
\r
7458 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7459 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7463 deviceFormat = SND_PCM_FORMAT_S8;
\r
7464 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7465 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7469 // If we get here, no supported format was found.
\r
7470 snd_pcm_close( phandle );
\r
7471 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7472 errorText_ = errorStream_.str();
\r
7476 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7477 if ( result < 0 ) {
\r
7478 snd_pcm_close( phandle );
\r
7479 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7480 errorText_ = errorStream_.str();
\r
7484 // Determine whether byte-swaping is necessary.
\r
7485 stream_.doByteSwap[mode] = false;
\r
7486 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7487 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7488 if ( result == 0 )
\r
7489 stream_.doByteSwap[mode] = true;
\r
7490 else if (result < 0) {
\r
7491 snd_pcm_close( phandle );
\r
7492 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7493 errorText_ = errorStream_.str();
\r
7498 // Set the sample rate.
\r
7499 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7500 if ( result < 0 ) {
\r
7501 snd_pcm_close( phandle );
\r
7502 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7503 errorText_ = errorStream_.str();
\r
7507 // Determine the number of channels for this device. We support a possible
\r
7508 // minimum device channel number > than the value requested by the user.
\r
7509 stream_.nUserChannels[mode] = channels;
\r
7510 unsigned int value;
\r
7511 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7512 unsigned int deviceChannels = value;
\r
7513 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7514 snd_pcm_close( phandle );
\r
7515 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7516 errorText_ = errorStream_.str();
\r
7520 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7521 if ( result < 0 ) {
\r
7522 snd_pcm_close( phandle );
\r
7523 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7524 errorText_ = errorStream_.str();
\r
7527 deviceChannels = value;
\r
7528 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7529 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7531 // Set the device channels.
\r
7532 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7533 if ( result < 0 ) {
\r
7534 snd_pcm_close( phandle );
\r
7535 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7536 errorText_ = errorStream_.str();
\r
7540 // Set the buffer (or period) size.
\r
7542 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7543 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7544 if ( result < 0 ) {
\r
7545 snd_pcm_close( phandle );
\r
7546 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7547 errorText_ = errorStream_.str();
\r
7550 *bufferSize = periodSize;
\r
7552 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7553 unsigned int periods = 0;
\r
7554 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7555 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7556 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7557 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7558 if ( result < 0 ) {
\r
7559 snd_pcm_close( phandle );
\r
7560 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7561 errorText_ = errorStream_.str();
\r
7565 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7566 // MUST be the same in both directions!
\r
7567 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7568 snd_pcm_close( phandle );
\r
7569 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7570 errorText_ = errorStream_.str();
\r
7574 stream_.bufferSize = *bufferSize;
\r
7576 // Install the hardware configuration
\r
7577 result = snd_pcm_hw_params( phandle, hw_params );
\r
7578 if ( result < 0 ) {
\r
7579 snd_pcm_close( phandle );
\r
7580 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7581 errorText_ = errorStream_.str();
\r
7585 #if defined(__RTAUDIO_DEBUG__)
\r
7586 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7587 snd_pcm_hw_params_dump( hw_params, out );
\r
7590 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7591 snd_pcm_sw_params_t *sw_params = NULL;
\r
7592 snd_pcm_sw_params_alloca( &sw_params );
\r
7593 snd_pcm_sw_params_current( phandle, sw_params );
\r
7594 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7595 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7596 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7598 // The following two settings were suggested by Theo Veenker
\r
7599 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7600 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7602 // here are two options for a fix
\r
7603 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7604 snd_pcm_uframes_t val;
\r
7605 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7606 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7608 result = snd_pcm_sw_params( phandle, sw_params );
\r
7609 if ( result < 0 ) {
\r
7610 snd_pcm_close( phandle );
\r
7611 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7612 errorText_ = errorStream_.str();
\r
7616 #if defined(__RTAUDIO_DEBUG__)
\r
7617 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7618 snd_pcm_sw_params_dump( sw_params, out );
\r
7621 // Set flags for buffer conversion
\r
7622 stream_.doConvertBuffer[mode] = false;
\r
7623 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7624 stream_.doConvertBuffer[mode] = true;
\r
7625 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7626 stream_.doConvertBuffer[mode] = true;
\r
7627 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7628 stream_.nUserChannels[mode] > 1 )
\r
7629 stream_.doConvertBuffer[mode] = true;
\r
7631 // Allocate the ApiHandle if necessary and then save.
\r
7632 AlsaHandle *apiInfo = 0;
\r
7633 if ( stream_.apiHandle == 0 ) {
\r
7635 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7637 catch ( std::bad_alloc& ) {
\r
7638 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7642 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7643 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7647 stream_.apiHandle = (void *) apiInfo;
\r
7648 apiInfo->handles[0] = 0;
\r
7649 apiInfo->handles[1] = 0;
\r
7652 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7654 apiInfo->handles[mode] = phandle;
\r
7657 // Allocate necessary internal buffers.
\r
7658 unsigned long bufferBytes;
\r
7659 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7660 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7661 if ( stream_.userBuffer[mode] == NULL ) {
\r
7662 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7666 if ( stream_.doConvertBuffer[mode] ) {
\r
7668 bool makeBuffer = true;
\r
7669 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7670 if ( mode == INPUT ) {
\r
7671 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7672 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7673 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7677 if ( makeBuffer ) {
\r
7678 bufferBytes *= *bufferSize;
\r
7679 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7680 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7681 if ( stream_.deviceBuffer == NULL ) {
\r
7682 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7688 stream_.sampleRate = sampleRate;
\r
7689 stream_.nBuffers = periods;
\r
7690 stream_.device[mode] = device;
\r
7691 stream_.state = STREAM_STOPPED;
\r
7693 // Setup the buffer conversion information structure.
\r
7694 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7696 // Setup thread if necessary.
\r
7697 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7698 // We had already set up an output stream.
\r
7699 stream_.mode = DUPLEX;
\r
7700 // Link the streams if possible.
\r
7701 apiInfo->synchronized = false;
\r
7702 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7703 apiInfo->synchronized = true;
\r
7705 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7706 error( RtAudioError::WARNING );
\r
7710 stream_.mode = mode;
\r
7712 // Setup callback thread.
\r
7713 stream_.callbackInfo.object = (void *) this;
\r
7715 // Set the thread attributes for joinable and realtime scheduling
\r
7716 // priority (optional). The higher priority will only take affect
\r
7717 // if the program is run as root or suid. Note, under Linux
\r
7718 // processes with CAP_SYS_NICE privilege, a user can change
\r
7719 // scheduling policy and priority (thus need not be root). See
\r
7720 // POSIX "capabilities".
\r
7721 pthread_attr_t attr;
\r
7722 pthread_attr_init( &attr );
\r
7723 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7725 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7726 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7727 // We previously attempted to increase the audio callback priority
\r
7728 // to SCHED_RR here via the attributes. However, while no errors
\r
7729 // were reported in doing so, it did not work. So, now this is
\r
7730 // done in the alsaCallbackHandler function.
\r
7731 stream_.callbackInfo.doRealtime = true;
\r
7732 int priority = options->priority;
\r
7733 int min = sched_get_priority_min( SCHED_RR );
\r
7734 int max = sched_get_priority_max( SCHED_RR );
\r
7735 if ( priority < min ) priority = min;
\r
7736 else if ( priority > max ) priority = max;
\r
7737 stream_.callbackInfo.priority = priority;
\r
7741 stream_.callbackInfo.isRunning = true;
\r
7742 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7743 pthread_attr_destroy( &attr );
\r
7745 stream_.callbackInfo.isRunning = false;
\r
7746 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7755 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7756 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7757 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7759 stream_.apiHandle = 0;
\r
7762 if ( phandle) snd_pcm_close( phandle );
\r
7764 for ( int i=0; i<2; i++ ) {
\r
7765 if ( stream_.userBuffer[i] ) {
\r
7766 free( stream_.userBuffer[i] );
\r
7767 stream_.userBuffer[i] = 0;
\r
7771 if ( stream_.deviceBuffer ) {
\r
7772 free( stream_.deviceBuffer );
\r
7773 stream_.deviceBuffer = 0;
\r
7776 stream_.state = STREAM_CLOSED;
\r
7780 void RtApiAlsa :: closeStream()
\r
7782 if ( stream_.state == STREAM_CLOSED ) {
\r
7783 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7784 error( RtAudioError::WARNING );
\r
7788 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7789 stream_.callbackInfo.isRunning = false;
\r
7790 MUTEX_LOCK( &stream_.mutex );
\r
7791 if ( stream_.state == STREAM_STOPPED ) {
\r
7792 apiInfo->runnable = true;
\r
7793 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7795 MUTEX_UNLOCK( &stream_.mutex );
\r
7796 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7798 if ( stream_.state == STREAM_RUNNING ) {
\r
7799 stream_.state = STREAM_STOPPED;
\r
7800 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7801 snd_pcm_drop( apiInfo->handles[0] );
\r
7802 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7803 snd_pcm_drop( apiInfo->handles[1] );
\r
7807 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7808 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7809 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7811 stream_.apiHandle = 0;
\r
7814 for ( int i=0; i<2; i++ ) {
\r
7815 if ( stream_.userBuffer[i] ) {
\r
7816 free( stream_.userBuffer[i] );
\r
7817 stream_.userBuffer[i] = 0;
\r
7821 if ( stream_.deviceBuffer ) {
\r
7822 free( stream_.deviceBuffer );
\r
7823 stream_.deviceBuffer = 0;
\r
7826 stream_.mode = UNINITIALIZED;
\r
7827 stream_.state = STREAM_CLOSED;
\r
7830 void RtApiAlsa :: startStream()
\r
7832 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7835 if ( stream_.state == STREAM_RUNNING ) {
\r
7836 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7837 error( RtAudioError::WARNING );
\r
7841 MUTEX_LOCK( &stream_.mutex );
\r
7844 snd_pcm_state_t state;
\r
7845 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7846 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7847 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7848 state = snd_pcm_state( handle[0] );
\r
7849 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7850 result = snd_pcm_prepare( handle[0] );
\r
7851 if ( result < 0 ) {
\r
7852 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7853 errorText_ = errorStream_.str();
\r
7859 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7860 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7861 state = snd_pcm_state( handle[1] );
\r
7862 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7863 result = snd_pcm_prepare( handle[1] );
\r
7864 if ( result < 0 ) {
\r
7865 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7866 errorText_ = errorStream_.str();
\r
7872 stream_.state = STREAM_RUNNING;
\r
7875 apiInfo->runnable = true;
\r
7876 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7877 MUTEX_UNLOCK( &stream_.mutex );
\r
7879 if ( result >= 0 ) return;
\r
7880 error( RtAudioError::SYSTEM_ERROR );
\r
7883 void RtApiAlsa :: stopStream()
\r
7886 if ( stream_.state == STREAM_STOPPED ) {
\r
7887 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7888 error( RtAudioError::WARNING );
\r
7892 stream_.state = STREAM_STOPPED;
\r
7893 MUTEX_LOCK( &stream_.mutex );
\r
7896 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7897 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7898 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7899 if ( apiInfo->synchronized )
\r
7900 result = snd_pcm_drop( handle[0] );
\r
7902 result = snd_pcm_drain( handle[0] );
\r
7903 if ( result < 0 ) {
\r
7904 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7905 errorText_ = errorStream_.str();
\r
7910 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7911 result = snd_pcm_drop( handle[1] );
\r
7912 if ( result < 0 ) {
\r
7913 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7914 errorText_ = errorStream_.str();
\r
7920 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7921 MUTEX_UNLOCK( &stream_.mutex );
\r
7923 if ( result >= 0 ) return;
\r
7924 error( RtAudioError::SYSTEM_ERROR );
\r
7927 void RtApiAlsa :: abortStream()
\r
7930 if ( stream_.state == STREAM_STOPPED ) {
\r
7931 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7932 error( RtAudioError::WARNING );
\r
7936 stream_.state = STREAM_STOPPED;
\r
7937 MUTEX_LOCK( &stream_.mutex );
\r
7940 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7941 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7942 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7943 result = snd_pcm_drop( handle[0] );
\r
7944 if ( result < 0 ) {
\r
7945 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7946 errorText_ = errorStream_.str();
\r
7951 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7952 result = snd_pcm_drop( handle[1] );
\r
7953 if ( result < 0 ) {
\r
7954 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7955 errorText_ = errorStream_.str();
\r
7961 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7962 MUTEX_UNLOCK( &stream_.mutex );
\r
7964 if ( result >= 0 ) return;
\r
7965 error( RtAudioError::SYSTEM_ERROR );
\r
7968 void RtApiAlsa :: callbackEvent()
\r
7970 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7971 if ( stream_.state == STREAM_STOPPED ) {
\r
7972 MUTEX_LOCK( &stream_.mutex );
\r
7973 while ( !apiInfo->runnable )
\r
7974 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7976 if ( stream_.state != STREAM_RUNNING ) {
\r
7977 MUTEX_UNLOCK( &stream_.mutex );
\r
7980 MUTEX_UNLOCK( &stream_.mutex );
\r
7983 if ( stream_.state == STREAM_CLOSED ) {
\r
7984 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7985 error( RtAudioError::WARNING );
\r
7989 int doStopStream = 0;
\r
7990 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7991 double streamTime = getStreamTime();
\r
7992 RtAudioStreamStatus status = 0;
\r
7993 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7994 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7995 apiInfo->xrun[0] = false;
\r
7997 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7998 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7999 apiInfo->xrun[1] = false;
\r
8001 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
8002 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
8004 if ( doStopStream == 2 ) {
\r
8009 MUTEX_LOCK( &stream_.mutex );
\r
8011 // The state might change while waiting on a mutex.
\r
8012 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
8017 snd_pcm_t **handle;
\r
8018 snd_pcm_sframes_t frames;
\r
8019 RtAudioFormat format;
\r
8020 handle = (snd_pcm_t **) apiInfo->handles;
\r
8022 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
8024 // Setup parameters.
\r
8025 if ( stream_.doConvertBuffer[1] ) {
\r
8026 buffer = stream_.deviceBuffer;
\r
8027 channels = stream_.nDeviceChannels[1];
\r
8028 format = stream_.deviceFormat[1];
\r
8031 buffer = stream_.userBuffer[1];
\r
8032 channels = stream_.nUserChannels[1];
\r
8033 format = stream_.userFormat;
\r
8036 // Read samples from device in interleaved/non-interleaved format.
\r
8037 if ( stream_.deviceInterleaved[1] )
\r
8038 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
8040 void *bufs[channels];
\r
8041 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8042 for ( int i=0; i<channels; i++ )
\r
8043 bufs[i] = (void *) (buffer + (i * offset));
\r
8044 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
8047 if ( result < (int) stream_.bufferSize ) {
\r
8048 // Either an error or overrun occured.
\r
8049 if ( result == -EPIPE ) {
\r
8050 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
8051 if ( state == SND_PCM_STATE_XRUN ) {
\r
8052 apiInfo->xrun[1] = true;
\r
8053 result = snd_pcm_prepare( handle[1] );
\r
8054 if ( result < 0 ) {
\r
8055 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
8056 errorText_ = errorStream_.str();
\r
8060 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8061 errorText_ = errorStream_.str();
\r
8065 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
8066 errorText_ = errorStream_.str();
\r
8068 error( RtAudioError::WARNING );
\r
8072 // Do byte swapping if necessary.
\r
8073 if ( stream_.doByteSwap[1] )
\r
8074 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8076 // Do buffer conversion if necessary.
\r
8077 if ( stream_.doConvertBuffer[1] )
\r
8078 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8080 // Check stream latency
\r
8081 result = snd_pcm_delay( handle[1], &frames );
\r
8082 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8087 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8089 // Setup parameters and do buffer conversion if necessary.
\r
8090 if ( stream_.doConvertBuffer[0] ) {
\r
8091 buffer = stream_.deviceBuffer;
\r
8092 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8093 channels = stream_.nDeviceChannels[0];
\r
8094 format = stream_.deviceFormat[0];
\r
8097 buffer = stream_.userBuffer[0];
\r
8098 channels = stream_.nUserChannels[0];
\r
8099 format = stream_.userFormat;
\r
8102 // Do byte swapping if necessary.
\r
8103 if ( stream_.doByteSwap[0] )
\r
8104 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8106 // Write samples to device in interleaved/non-interleaved format.
\r
8107 if ( stream_.deviceInterleaved[0] )
\r
8108 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8110 void *bufs[channels];
\r
8111 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8112 for ( int i=0; i<channels; i++ )
\r
8113 bufs[i] = (void *) (buffer + (i * offset));
\r
8114 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8117 if ( result < (int) stream_.bufferSize ) {
\r
8118 // Either an error or underrun occured.
\r
8119 if ( result == -EPIPE ) {
\r
8120 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8121 if ( state == SND_PCM_STATE_XRUN ) {
\r
8122 apiInfo->xrun[0] = true;
\r
8123 result = snd_pcm_prepare( handle[0] );
\r
8124 if ( result < 0 ) {
\r
8125 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8126 errorText_ = errorStream_.str();
\r
8129 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8132 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8133 errorText_ = errorStream_.str();
\r
8137 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8138 errorText_ = errorStream_.str();
\r
8140 error( RtAudioError::WARNING );
\r
8144 // Check stream latency
\r
8145 result = snd_pcm_delay( handle[0], &frames );
\r
8146 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8150 MUTEX_UNLOCK( &stream_.mutex );
\r
8152 RtApi::tickStreamTime();
\r
8153 if ( doStopStream == 1 ) this->stopStream();
\r
8156 static void *alsaCallbackHandler( void *ptr )
\r
8158 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8159 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8160 bool *isRunning = &info->isRunning;
\r
8162 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8163 if ( info->doRealtime ) {
\r
8164 pthread_t tID = pthread_self(); // ID of this thread
\r
8165 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8166 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8170 while ( *isRunning == true ) {
\r
8171 pthread_testcancel();
\r
8172 object->callbackEvent();
\r
8175 pthread_exit( NULL );
\r
8178 //******************** End of __LINUX_ALSA__ *********************//
\r
8181 #if defined(__LINUX_PULSE__)
\r
8183 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8184 // and Tristan Matthews.
\r
8186 #include <pulse/error.h>
\r
8187 #include <pulse/simple.h>
\r
8190 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8191 44100, 48000, 96000, 0};
\r
8193 struct rtaudio_pa_format_mapping_t {
\r
8194 RtAudioFormat rtaudio_format;
\r
8195 pa_sample_format_t pa_format;
\r
8198 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8199 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8200 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8201 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8202 {0, PA_SAMPLE_INVALID}};
\r
8204 struct PulseAudioHandle {
\r
8205 pa_simple *s_play;
\r
8208 pthread_cond_t runnable_cv;
\r
8210 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8213 RtApiPulse::~RtApiPulse()
\r
8215 if ( stream_.state != STREAM_CLOSED )
\r
8219 unsigned int RtApiPulse::getDeviceCount( void )
\r
8224 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8226 RtAudio::DeviceInfo info;
\r
8227 info.probed = true;
\r
8228 info.name = "PulseAudio";
\r
8229 info.outputChannels = 2;
\r
8230 info.inputChannels = 2;
\r
8231 info.duplexChannels = 2;
\r
8232 info.isDefaultOutput = true;
\r
8233 info.isDefaultInput = true;
\r
8235 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8236 info.sampleRates.push_back( *sr );
\r
8238 info.preferredSampleRate = 48000;
\r
8239 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8244 static void *pulseaudio_callback( void * user )
\r
8246 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8247 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8248 volatile bool *isRunning = &cbi->isRunning;
\r
8250 while ( *isRunning ) {
\r
8251 pthread_testcancel();
\r
8252 context->callbackEvent();
\r
8255 pthread_exit( NULL );
\r
8258 void RtApiPulse::closeStream( void )
\r
8260 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8262 stream_.callbackInfo.isRunning = false;
\r
8264 MUTEX_LOCK( &stream_.mutex );
\r
8265 if ( stream_.state == STREAM_STOPPED ) {
\r
8266 pah->runnable = true;
\r
8267 pthread_cond_signal( &pah->runnable_cv );
\r
8269 MUTEX_UNLOCK( &stream_.mutex );
\r
8271 pthread_join( pah->thread, 0 );
\r
8272 if ( pah->s_play ) {
\r
8273 pa_simple_flush( pah->s_play, NULL );
\r
8274 pa_simple_free( pah->s_play );
\r
8277 pa_simple_free( pah->s_rec );
\r
8279 pthread_cond_destroy( &pah->runnable_cv );
\r
8281 stream_.apiHandle = 0;
\r
8284 if ( stream_.userBuffer[0] ) {
\r
8285 free( stream_.userBuffer[0] );
\r
8286 stream_.userBuffer[0] = 0;
\r
8288 if ( stream_.userBuffer[1] ) {
\r
8289 free( stream_.userBuffer[1] );
\r
8290 stream_.userBuffer[1] = 0;
\r
8293 stream_.state = STREAM_CLOSED;
\r
8294 stream_.mode = UNINITIALIZED;
\r
8297 void RtApiPulse::callbackEvent( void )
\r
8299 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8301 if ( stream_.state == STREAM_STOPPED ) {
\r
8302 MUTEX_LOCK( &stream_.mutex );
\r
8303 while ( !pah->runnable )
\r
8304 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8306 if ( stream_.state != STREAM_RUNNING ) {
\r
8307 MUTEX_UNLOCK( &stream_.mutex );
\r
8310 MUTEX_UNLOCK( &stream_.mutex );
\r
8313 if ( stream_.state == STREAM_CLOSED ) {
\r
8314 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8315 "this shouldn't happen!";
\r
8316 error( RtAudioError::WARNING );
\r
8320 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8321 double streamTime = getStreamTime();
\r
8322 RtAudioStreamStatus status = 0;
\r
8323 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8324 stream_.bufferSize, streamTime, status,
\r
8325 stream_.callbackInfo.userData );
\r
8327 if ( doStopStream == 2 ) {
\r
8332 MUTEX_LOCK( &stream_.mutex );
\r
8333 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8334 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8336 if ( stream_.state != STREAM_RUNNING )
\r
8341 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8342 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8343 convertBuffer( stream_.deviceBuffer,
\r
8344 stream_.userBuffer[OUTPUT],
\r
8345 stream_.convertInfo[OUTPUT] );
\r
8346 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8347 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8349 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8350 formatBytes( stream_.userFormat );
\r
8352 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8353 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8354 pa_strerror( pa_error ) << ".";
\r
8355 errorText_ = errorStream_.str();
\r
8356 error( RtAudioError::WARNING );
\r
8360 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8361 if ( stream_.doConvertBuffer[INPUT] )
\r
8362 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8363 formatBytes( stream_.deviceFormat[INPUT] );
\r
8365 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8366 formatBytes( stream_.userFormat );
\r
8368 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8369 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8370 pa_strerror( pa_error ) << ".";
\r
8371 errorText_ = errorStream_.str();
\r
8372 error( RtAudioError::WARNING );
\r
8374 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8375 convertBuffer( stream_.userBuffer[INPUT],
\r
8376 stream_.deviceBuffer,
\r
8377 stream_.convertInfo[INPUT] );
\r
8382 MUTEX_UNLOCK( &stream_.mutex );
\r
8383 RtApi::tickStreamTime();
\r
8385 if ( doStopStream == 1 )
\r
8389 void RtApiPulse::startStream( void )
\r
8391 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8393 if ( stream_.state == STREAM_CLOSED ) {
\r
8394 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8395 error( RtAudioError::INVALID_USE );
\r
8398 if ( stream_.state == STREAM_RUNNING ) {
\r
8399 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8400 error( RtAudioError::WARNING );
\r
8404 MUTEX_LOCK( &stream_.mutex );
\r
8406 stream_.state = STREAM_RUNNING;
\r
8408 pah->runnable = true;
\r
8409 pthread_cond_signal( &pah->runnable_cv );
\r
8410 MUTEX_UNLOCK( &stream_.mutex );
\r
8413 void RtApiPulse::stopStream( void )
\r
8415 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8417 if ( stream_.state == STREAM_CLOSED ) {
\r
8418 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8419 error( RtAudioError::INVALID_USE );
\r
8422 if ( stream_.state == STREAM_STOPPED ) {
\r
8423 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8424 error( RtAudioError::WARNING );
\r
8428 stream_.state = STREAM_STOPPED;
\r
8429 MUTEX_LOCK( &stream_.mutex );
\r
8431 if ( pah && pah->s_play ) {
\r
8433 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8434 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8435 pa_strerror( pa_error ) << ".";
\r
8436 errorText_ = errorStream_.str();
\r
8437 MUTEX_UNLOCK( &stream_.mutex );
\r
8438 error( RtAudioError::SYSTEM_ERROR );
\r
8443 stream_.state = STREAM_STOPPED;
\r
8444 MUTEX_UNLOCK( &stream_.mutex );
\r
8447 void RtApiPulse::abortStream( void )
\r
8449 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8451 if ( stream_.state == STREAM_CLOSED ) {
\r
8452 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8453 error( RtAudioError::INVALID_USE );
\r
8456 if ( stream_.state == STREAM_STOPPED ) {
\r
8457 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8458 error( RtAudioError::WARNING );
\r
8462 stream_.state = STREAM_STOPPED;
\r
8463 MUTEX_LOCK( &stream_.mutex );
\r
8465 if ( pah && pah->s_play ) {
\r
8467 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8468 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8469 pa_strerror( pa_error ) << ".";
\r
8470 errorText_ = errorStream_.str();
\r
8471 MUTEX_UNLOCK( &stream_.mutex );
\r
8472 error( RtAudioError::SYSTEM_ERROR );
\r
8477 stream_.state = STREAM_STOPPED;
\r
8478 MUTEX_UNLOCK( &stream_.mutex );
\r
8481 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8482 unsigned int channels, unsigned int firstChannel,
\r
8483 unsigned int sampleRate, RtAudioFormat format,
\r
8484 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8486 PulseAudioHandle *pah = 0;
\r
8487 unsigned long bufferBytes = 0;
\r
8488 pa_sample_spec ss;
\r
8490 if ( device != 0 ) return false;
\r
8491 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8492 if ( channels != 1 && channels != 2 ) {
\r
8493 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8496 ss.channels = channels;
\r
8498 if ( firstChannel != 0 ) return false;
\r
8500 bool sr_found = false;
\r
8501 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8502 if ( sampleRate == *sr ) {
\r
8504 stream_.sampleRate = sampleRate;
\r
8505 ss.rate = sampleRate;
\r
8509 if ( !sr_found ) {
\r
8510 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8514 bool sf_found = 0;
\r
8515 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8516 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8517 if ( format == sf->rtaudio_format ) {
\r
8519 stream_.userFormat = sf->rtaudio_format;
\r
8520 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8521 ss.format = sf->pa_format;
\r
8525 if ( !sf_found ) { // Use internal data format conversion.
\r
8526 stream_.userFormat = format;
\r
8527 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8528 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8531 // Set other stream parameters.
\r
8532 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8533 else stream_.userInterleaved = true;
\r
8534 stream_.deviceInterleaved[mode] = true;
\r
8535 stream_.nBuffers = 1;
\r
8536 stream_.doByteSwap[mode] = false;
\r
8537 stream_.nUserChannels[mode] = channels;
\r
8538 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8539 stream_.channelOffset[mode] = 0;
\r
8540 std::string streamName = "RtAudio";
\r
8542 // Set flags for buffer conversion.
\r
8543 stream_.doConvertBuffer[mode] = false;
\r
8544 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8545 stream_.doConvertBuffer[mode] = true;
\r
8546 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8547 stream_.doConvertBuffer[mode] = true;
\r
8549 // Allocate necessary internal buffers.
\r
8550 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8551 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8552 if ( stream_.userBuffer[mode] == NULL ) {
\r
8553 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8556 stream_.bufferSize = *bufferSize;
\r
8558 if ( stream_.doConvertBuffer[mode] ) {
\r
8560 bool makeBuffer = true;
\r
8561 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8562 if ( mode == INPUT ) {
\r
8563 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8564 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8565 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8569 if ( makeBuffer ) {
\r
8570 bufferBytes *= *bufferSize;
\r
8571 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8572 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8573 if ( stream_.deviceBuffer == NULL ) {
\r
8574 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8580 stream_.device[mode] = device;
\r
8582 // Setup the buffer conversion information structure.
\r
8583 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8585 if ( !stream_.apiHandle ) {
\r
8586 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8588 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8592 stream_.apiHandle = pah;
\r
8593 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8594 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8598 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8601 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8604 pa_buffer_attr buffer_attr;
\r
8605 buffer_attr.fragsize = bufferBytes;
\r
8606 buffer_attr.maxlength = -1;
\r
8608 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8609 if ( !pah->s_rec ) {
\r
8610 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8615 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8616 if ( !pah->s_play ) {
\r
8617 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8625 if ( stream_.mode == UNINITIALIZED )
\r
8626 stream_.mode = mode;
\r
8627 else if ( stream_.mode == mode )
\r
8630 stream_.mode = DUPLEX;
\r
8632 if ( !stream_.callbackInfo.isRunning ) {
\r
8633 stream_.callbackInfo.object = this;
\r
8634 stream_.callbackInfo.isRunning = true;
\r
8635 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8636 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8641 stream_.state = STREAM_STOPPED;
\r
8645 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8646 pthread_cond_destroy( &pah->runnable_cv );
\r
8648 stream_.apiHandle = 0;
\r
8651 for ( int i=0; i<2; i++ ) {
\r
8652 if ( stream_.userBuffer[i] ) {
\r
8653 free( stream_.userBuffer[i] );
\r
8654 stream_.userBuffer[i] = 0;
\r
8658 if ( stream_.deviceBuffer ) {
\r
8659 free( stream_.deviceBuffer );
\r
8660 stream_.deviceBuffer = 0;
\r
8666 //******************** End of __LINUX_PULSE__ *********************//
\r
8669 #if defined(__LINUX_OSS__)
\r
8671 #include <unistd.h>
\r
8672 #include <sys/ioctl.h>
\r
8673 #include <unistd.h>
\r
8674 #include <fcntl.h>
\r
8675 #include <sys/soundcard.h>
\r
8676 #include <errno.h>
\r
8679 static void *ossCallbackHandler(void * ptr);
\r
8681 // A structure to hold various information related to the OSS API
\r
8682 // implementation.
\r
8683 struct OssHandle {
\r
8684 int id[2]; // device ids
\r
8687 pthread_cond_t runnable;
\r
8690 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8693 RtApiOss :: RtApiOss()
\r
8695 // Nothing to do here.
\r
8698 RtApiOss :: ~RtApiOss()
\r
8700 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8703 unsigned int RtApiOss :: getDeviceCount( void )
\r
8705 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8706 if ( mixerfd == -1 ) {
\r
8707 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8708 error( RtAudioError::WARNING );
\r
8712 oss_sysinfo sysinfo;
\r
8713 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8715 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8716 error( RtAudioError::WARNING );
\r
8721 return sysinfo.numaudios;
\r
8724 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8726 RtAudio::DeviceInfo info;
\r
8727 info.probed = false;
\r
8729 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8730 if ( mixerfd == -1 ) {
\r
8731 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8732 error( RtAudioError::WARNING );
\r
8736 oss_sysinfo sysinfo;
\r
8737 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8738 if ( result == -1 ) {
\r
8740 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8741 error( RtAudioError::WARNING );
\r
8745 unsigned nDevices = sysinfo.numaudios;
\r
8746 if ( nDevices == 0 ) {
\r
8748 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8749 error( RtAudioError::INVALID_USE );
\r
8753 if ( device >= nDevices ) {
\r
8755 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8756 error( RtAudioError::INVALID_USE );
\r
8760 oss_audioinfo ainfo;
\r
8761 ainfo.dev = device;
\r
8762 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8764 if ( result == -1 ) {
\r
8765 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8766 errorText_ = errorStream_.str();
\r
8767 error( RtAudioError::WARNING );
\r
8772 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8773 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8774 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8775 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8776 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8779 // Probe data formats ... do for input
\r
8780 unsigned long mask = ainfo.iformats;
\r
8781 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8782 info.nativeFormats |= RTAUDIO_SINT16;
\r
8783 if ( mask & AFMT_S8 )
\r
8784 info.nativeFormats |= RTAUDIO_SINT8;
\r
8785 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8786 info.nativeFormats |= RTAUDIO_SINT32;
\r
8788 if ( mask & AFMT_FLOAT )
\r
8789 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8791 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8792 info.nativeFormats |= RTAUDIO_SINT24;
\r
8794 // Check that we have at least one supported format
\r
8795 if ( info.nativeFormats == 0 ) {
\r
8796 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8797 errorText_ = errorStream_.str();
\r
8798 error( RtAudioError::WARNING );
\r
8802 // Probe the supported sample rates.
\r
8803 info.sampleRates.clear();
\r
8804 if ( ainfo.nrates ) {
\r
8805 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8806 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8807 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8808 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8810 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8811 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8819 // Check min and max rate values;
\r
8820 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8821 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8822 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8824 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8825 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8830 if ( info.sampleRates.size() == 0 ) {
\r
8831 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8832 errorText_ = errorStream_.str();
\r
8833 error( RtAudioError::WARNING );
\r
8836 info.probed = true;
\r
8837 info.name = ainfo.name;
\r
8844 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8845 unsigned int firstChannel, unsigned int sampleRate,
\r
8846 RtAudioFormat format, unsigned int *bufferSize,
\r
8847 RtAudio::StreamOptions *options )
\r
8849 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8850 if ( mixerfd == -1 ) {
\r
8851 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8855 oss_sysinfo sysinfo;
\r
8856 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8857 if ( result == -1 ) {
\r
8859 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8863 unsigned nDevices = sysinfo.numaudios;
\r
8864 if ( nDevices == 0 ) {
\r
8865 // This should not happen because a check is made before this function is called.
\r
8867 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8871 if ( device >= nDevices ) {
\r
8872 // This should not happen because a check is made before this function is called.
\r
8874 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8878 oss_audioinfo ainfo;
\r
8879 ainfo.dev = device;
\r
8880 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8882 if ( result == -1 ) {
\r
8883 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8884 errorText_ = errorStream_.str();
\r
8888 // Check if device supports input or output
\r
8889 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8890 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8891 if ( mode == OUTPUT )
\r
8892 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8894 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8895 errorText_ = errorStream_.str();
\r
8900 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8901 if ( mode == OUTPUT )
\r
8902 flags |= O_WRONLY;
\r
8903 else { // mode == INPUT
\r
8904 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8905 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8906 close( handle->id[0] );
\r
8907 handle->id[0] = 0;
\r
8908 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8909 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8910 errorText_ = errorStream_.str();
\r
8913 // Check that the number previously set channels is the same.
\r
8914 if ( stream_.nUserChannels[0] != channels ) {
\r
8915 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8916 errorText_ = errorStream_.str();
\r
8922 flags |= O_RDONLY;
\r
8925 // Set exclusive access if specified.
\r
8926 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8928 // Try to open the device.
\r
8930 fd = open( ainfo.devnode, flags, 0 );
\r
8932 if ( errno == EBUSY )
\r
8933 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8935 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8936 errorText_ = errorStream_.str();
\r
8940 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8942 if ( flags | O_RDWR ) {
\r
8943 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8944 if ( result == -1) {
\r
8945 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8946 errorText_ = errorStream_.str();
\r
8952 // Check the device channel support.
\r
8953 stream_.nUserChannels[mode] = channels;
\r
8954 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8956 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8957 errorText_ = errorStream_.str();
\r
8961 // Set the number of channels.
\r
8962 int deviceChannels = channels + firstChannel;
\r
8963 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8964 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8966 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8967 errorText_ = errorStream_.str();
\r
8970 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8972 // Get the data format mask
\r
8974 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8975 if ( result == -1 ) {
\r
8977 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8978 errorText_ = errorStream_.str();
\r
8982 // Determine how to set the device format.
\r
8983 stream_.userFormat = format;
\r
8984 int deviceFormat = -1;
\r
8985 stream_.doByteSwap[mode] = false;
\r
8986 if ( format == RTAUDIO_SINT8 ) {
\r
8987 if ( mask & AFMT_S8 ) {
\r
8988 deviceFormat = AFMT_S8;
\r
8989 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8992 else if ( format == RTAUDIO_SINT16 ) {
\r
8993 if ( mask & AFMT_S16_NE ) {
\r
8994 deviceFormat = AFMT_S16_NE;
\r
8995 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8997 else if ( mask & AFMT_S16_OE ) {
\r
8998 deviceFormat = AFMT_S16_OE;
\r
8999 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9000 stream_.doByteSwap[mode] = true;
\r
9003 else if ( format == RTAUDIO_SINT24 ) {
\r
9004 if ( mask & AFMT_S24_NE ) {
\r
9005 deviceFormat = AFMT_S24_NE;
\r
9006 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9008 else if ( mask & AFMT_S24_OE ) {
\r
9009 deviceFormat = AFMT_S24_OE;
\r
9010 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9011 stream_.doByteSwap[mode] = true;
\r
9014 else if ( format == RTAUDIO_SINT32 ) {
\r
9015 if ( mask & AFMT_S32_NE ) {
\r
9016 deviceFormat = AFMT_S32_NE;
\r
9017 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9019 else if ( mask & AFMT_S32_OE ) {
\r
9020 deviceFormat = AFMT_S32_OE;
\r
9021 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9022 stream_.doByteSwap[mode] = true;
\r
9026 if ( deviceFormat == -1 ) {
\r
9027 // The user requested format is not natively supported by the device.
\r
9028 if ( mask & AFMT_S16_NE ) {
\r
9029 deviceFormat = AFMT_S16_NE;
\r
9030 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9032 else if ( mask & AFMT_S32_NE ) {
\r
9033 deviceFormat = AFMT_S32_NE;
\r
9034 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9036 else if ( mask & AFMT_S24_NE ) {
\r
9037 deviceFormat = AFMT_S24_NE;
\r
9038 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9040 else if ( mask & AFMT_S16_OE ) {
\r
9041 deviceFormat = AFMT_S16_OE;
\r
9042 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9043 stream_.doByteSwap[mode] = true;
\r
9045 else if ( mask & AFMT_S32_OE ) {
\r
9046 deviceFormat = AFMT_S32_OE;
\r
9047 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9048 stream_.doByteSwap[mode] = true;
\r
9050 else if ( mask & AFMT_S24_OE ) {
\r
9051 deviceFormat = AFMT_S24_OE;
\r
9052 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9053 stream_.doByteSwap[mode] = true;
\r
9055 else if ( mask & AFMT_S8) {
\r
9056 deviceFormat = AFMT_S8;
\r
9057 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9061 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9062 // This really shouldn't happen ...
\r
9064 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9065 errorText_ = errorStream_.str();
\r
9069 // Set the data format.
\r
9070 int temp = deviceFormat;
\r
9071 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9072 if ( result == -1 || deviceFormat != temp ) {
\r
9074 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9075 errorText_ = errorStream_.str();
\r
9079 // Attempt to set the buffer size. According to OSS, the minimum
\r
9080 // number of buffers is two. The supposed minimum buffer size is 16
\r
9081 // bytes, so that will be our lower bound. The argument to this
\r
9082 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9083 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9084 // We'll check the actual value used near the end of the setup
\r
9086 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9087 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9089 if ( options ) buffers = options->numberOfBuffers;
\r
9090 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9091 if ( buffers < 2 ) buffers = 3;
\r
9092 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9093 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9094 if ( result == -1 ) {
\r
9096 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9097 errorText_ = errorStream_.str();
\r
9100 stream_.nBuffers = buffers;
\r
9102 // Save buffer size (in sample frames).
\r
9103 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9104 stream_.bufferSize = *bufferSize;
\r
9106 // Set the sample rate.
\r
9107 int srate = sampleRate;
\r
9108 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9109 if ( result == -1 ) {
\r
9111 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9112 errorText_ = errorStream_.str();
\r
9116 // Verify the sample rate setup worked.
\r
9117 if ( abs( srate - (int)sampleRate ) > 100 ) {
\r
9119 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9120 errorText_ = errorStream_.str();
\r
9123 stream_.sampleRate = sampleRate;
\r
9125 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9126 // We're doing duplex setup here.
\r
9127 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9128 stream_.nDeviceChannels[0] = deviceChannels;
\r
9131 // Set interleaving parameters.
\r
9132 stream_.userInterleaved = true;
\r
9133 stream_.deviceInterleaved[mode] = true;
\r
9134 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9135 stream_.userInterleaved = false;
\r
9137 // Set flags for buffer conversion
\r
9138 stream_.doConvertBuffer[mode] = false;
\r
9139 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9140 stream_.doConvertBuffer[mode] = true;
\r
9141 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9142 stream_.doConvertBuffer[mode] = true;
\r
9143 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9144 stream_.nUserChannels[mode] > 1 )
\r
9145 stream_.doConvertBuffer[mode] = true;
\r
9147 // Allocate the stream handles if necessary and then save.
\r
9148 if ( stream_.apiHandle == 0 ) {
\r
9150 handle = new OssHandle;
\r
9152 catch ( std::bad_alloc& ) {
\r
9153 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9157 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9158 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9162 stream_.apiHandle = (void *) handle;
\r
9165 handle = (OssHandle *) stream_.apiHandle;
\r
9167 handle->id[mode] = fd;
\r
9169 // Allocate necessary internal buffers.
\r
9170 unsigned long bufferBytes;
\r
9171 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9172 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9173 if ( stream_.userBuffer[mode] == NULL ) {
\r
9174 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9178 if ( stream_.doConvertBuffer[mode] ) {
\r
9180 bool makeBuffer = true;
\r
9181 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9182 if ( mode == INPUT ) {
\r
9183 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9184 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9185 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9189 if ( makeBuffer ) {
\r
9190 bufferBytes *= *bufferSize;
\r
9191 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9192 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9193 if ( stream_.deviceBuffer == NULL ) {
\r
9194 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9200 stream_.device[mode] = device;
\r
9201 stream_.state = STREAM_STOPPED;
\r
9203 // Setup the buffer conversion information structure.
\r
9204 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9206 // Setup thread if necessary.
\r
9207 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9208 // We had already set up an output stream.
\r
9209 stream_.mode = DUPLEX;
\r
9210 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9213 stream_.mode = mode;
\r
9215 // Setup callback thread.
\r
9216 stream_.callbackInfo.object = (void *) this;
\r
9218 // Set the thread attributes for joinable and realtime scheduling
\r
9219 // priority. The higher priority will only take affect if the
\r
9220 // program is run as root or suid.
\r
9221 pthread_attr_t attr;
\r
9222 pthread_attr_init( &attr );
\r
9223 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9224 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9225 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9226 struct sched_param param;
\r
9227 int priority = options->priority;
\r
9228 int min = sched_get_priority_min( SCHED_RR );
\r
9229 int max = sched_get_priority_max( SCHED_RR );
\r
9230 if ( priority < min ) priority = min;
\r
9231 else if ( priority > max ) priority = max;
\r
9232 param.sched_priority = priority;
\r
9233 pthread_attr_setschedparam( &attr, ¶m );
\r
9234 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9237 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9239 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9242 stream_.callbackInfo.isRunning = true;
\r
9243 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9244 pthread_attr_destroy( &attr );
\r
9246 stream_.callbackInfo.isRunning = false;
\r
9247 errorText_ = "RtApiOss::error creating callback thread!";
\r
9256 pthread_cond_destroy( &handle->runnable );
\r
9257 if ( handle->id[0] ) close( handle->id[0] );
\r
9258 if ( handle->id[1] ) close( handle->id[1] );
\r
9260 stream_.apiHandle = 0;
\r
9263 for ( int i=0; i<2; i++ ) {
\r
9264 if ( stream_.userBuffer[i] ) {
\r
9265 free( stream_.userBuffer[i] );
\r
9266 stream_.userBuffer[i] = 0;
\r
9270 if ( stream_.deviceBuffer ) {
\r
9271 free( stream_.deviceBuffer );
\r
9272 stream_.deviceBuffer = 0;
\r
9278 void RtApiOss :: closeStream()
\r
9280 if ( stream_.state == STREAM_CLOSED ) {
\r
9281 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9282 error( RtAudioError::WARNING );
\r
9286 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9287 stream_.callbackInfo.isRunning = false;
\r
9288 MUTEX_LOCK( &stream_.mutex );
\r
9289 if ( stream_.state == STREAM_STOPPED )
\r
9290 pthread_cond_signal( &handle->runnable );
\r
9291 MUTEX_UNLOCK( &stream_.mutex );
\r
9292 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9294 if ( stream_.state == STREAM_RUNNING ) {
\r
9295 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9296 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9298 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9299 stream_.state = STREAM_STOPPED;
\r
9303 pthread_cond_destroy( &handle->runnable );
\r
9304 if ( handle->id[0] ) close( handle->id[0] );
\r
9305 if ( handle->id[1] ) close( handle->id[1] );
\r
9307 stream_.apiHandle = 0;
\r
9310 for ( int i=0; i<2; i++ ) {
\r
9311 if ( stream_.userBuffer[i] ) {
\r
9312 free( stream_.userBuffer[i] );
\r
9313 stream_.userBuffer[i] = 0;
\r
9317 if ( stream_.deviceBuffer ) {
\r
9318 free( stream_.deviceBuffer );
\r
9319 stream_.deviceBuffer = 0;
\r
9322 stream_.mode = UNINITIALIZED;
\r
9323 stream_.state = STREAM_CLOSED;
\r
9326 void RtApiOss :: startStream()
\r
9329 if ( stream_.state == STREAM_RUNNING ) {
\r
9330 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9331 error( RtAudioError::WARNING );
\r
9335 MUTEX_LOCK( &stream_.mutex );
\r
9337 stream_.state = STREAM_RUNNING;
\r
9339 // No need to do anything else here ... OSS automatically starts
\r
9340 // when fed samples.
\r
9342 MUTEX_UNLOCK( &stream_.mutex );
\r
9344 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9345 pthread_cond_signal( &handle->runnable );
\r
9348 void RtApiOss :: stopStream()
\r
9351 if ( stream_.state == STREAM_STOPPED ) {
\r
9352 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9353 error( RtAudioError::WARNING );
\r
9357 MUTEX_LOCK( &stream_.mutex );
\r
9359 // The state might change while waiting on a mutex.
\r
9360 if ( stream_.state == STREAM_STOPPED ) {
\r
9361 MUTEX_UNLOCK( &stream_.mutex );
\r
9366 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9367 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9369 // Flush the output with zeros a few times.
\r
9372 RtAudioFormat format;
\r
9374 if ( stream_.doConvertBuffer[0] ) {
\r
9375 buffer = stream_.deviceBuffer;
\r
9376 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9377 format = stream_.deviceFormat[0];
\r
9380 buffer = stream_.userBuffer[0];
\r
9381 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9382 format = stream_.userFormat;
\r
9385 memset( buffer, 0, samples * formatBytes(format) );
\r
9386 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9387 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9388 if ( result == -1 ) {
\r
9389 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9390 error( RtAudioError::WARNING );
\r
9394 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9395 if ( result == -1 ) {
\r
9396 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9397 errorText_ = errorStream_.str();
\r
9400 handle->triggered = false;
\r
9403 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9404 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9405 if ( result == -1 ) {
\r
9406 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9407 errorText_ = errorStream_.str();
\r
9413 stream_.state = STREAM_STOPPED;
\r
9414 MUTEX_UNLOCK( &stream_.mutex );
\r
9416 if ( result != -1 ) return;
\r
9417 error( RtAudioError::SYSTEM_ERROR );
\r
9420 void RtApiOss :: abortStream()
\r
9423 if ( stream_.state == STREAM_STOPPED ) {
\r
9424 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9425 error( RtAudioError::WARNING );
\r
9429 MUTEX_LOCK( &stream_.mutex );
\r
9431 // The state might change while waiting on a mutex.
\r
9432 if ( stream_.state == STREAM_STOPPED ) {
\r
9433 MUTEX_UNLOCK( &stream_.mutex );
\r
9438 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9439 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9440 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9441 if ( result == -1 ) {
\r
9442 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9443 errorText_ = errorStream_.str();
\r
9446 handle->triggered = false;
\r
9449 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9450 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9451 if ( result == -1 ) {
\r
9452 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9453 errorText_ = errorStream_.str();
\r
9459 stream_.state = STREAM_STOPPED;
\r
9460 MUTEX_UNLOCK( &stream_.mutex );
\r
9462 if ( result != -1 ) return;
\r
9463 error( RtAudioError::SYSTEM_ERROR );
\r
9466 void RtApiOss :: callbackEvent()
\r
9468 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9469 if ( stream_.state == STREAM_STOPPED ) {
\r
9470 MUTEX_LOCK( &stream_.mutex );
\r
9471 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9472 if ( stream_.state != STREAM_RUNNING ) {
\r
9473 MUTEX_UNLOCK( &stream_.mutex );
\r
9476 MUTEX_UNLOCK( &stream_.mutex );
\r
9479 if ( stream_.state == STREAM_CLOSED ) {
\r
9480 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9481 error( RtAudioError::WARNING );
\r
9485 // Invoke user callback to get fresh output data.
\r
9486 int doStopStream = 0;
\r
9487 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9488 double streamTime = getStreamTime();
\r
9489 RtAudioStreamStatus status = 0;
\r
9490 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9491 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9492 handle->xrun[0] = false;
\r
9494 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9495 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9496 handle->xrun[1] = false;
\r
9498 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9499 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9500 if ( doStopStream == 2 ) {
\r
9501 this->abortStream();
\r
9505 MUTEX_LOCK( &stream_.mutex );
\r
9507 // The state might change while waiting on a mutex.
\r
9508 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9513 RtAudioFormat format;
\r
9515 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9517 // Setup parameters and do buffer conversion if necessary.
\r
9518 if ( stream_.doConvertBuffer[0] ) {
\r
9519 buffer = stream_.deviceBuffer;
\r
9520 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9521 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9522 format = stream_.deviceFormat[0];
\r
9525 buffer = stream_.userBuffer[0];
\r
9526 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9527 format = stream_.userFormat;
\r
9530 // Do byte swapping if necessary.
\r
9531 if ( stream_.doByteSwap[0] )
\r
9532 byteSwapBuffer( buffer, samples, format );
\r
9534 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9536 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9537 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9538 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9539 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9540 handle->triggered = true;
\r
9543 // Write samples to device.
\r
9544 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9546 if ( result == -1 ) {
\r
9547 // We'll assume this is an underrun, though there isn't a
\r
9548 // specific means for determining that.
\r
9549 handle->xrun[0] = true;
\r
9550 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9551 error( RtAudioError::WARNING );
\r
9552 // Continue on to input section.
\r
9556 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9558 // Setup parameters.
\r
9559 if ( stream_.doConvertBuffer[1] ) {
\r
9560 buffer = stream_.deviceBuffer;
\r
9561 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9562 format = stream_.deviceFormat[1];
\r
9565 buffer = stream_.userBuffer[1];
\r
9566 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9567 format = stream_.userFormat;
\r
9570 // Read samples from device.
\r
9571 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9573 if ( result == -1 ) {
\r
9574 // We'll assume this is an overrun, though there isn't a
\r
9575 // specific means for determining that.
\r
9576 handle->xrun[1] = true;
\r
9577 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9578 error( RtAudioError::WARNING );
\r
9582 // Do byte swapping if necessary.
\r
9583 if ( stream_.doByteSwap[1] )
\r
9584 byteSwapBuffer( buffer, samples, format );
\r
9586 // Do buffer conversion if necessary.
\r
9587 if ( stream_.doConvertBuffer[1] )
\r
9588 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9592 MUTEX_UNLOCK( &stream_.mutex );
\r
9594 RtApi::tickStreamTime();
\r
9595 if ( doStopStream == 1 ) this->stopStream();
\r
9598 static void *ossCallbackHandler( void *ptr )
\r
9600 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9601 RtApiOss *object = (RtApiOss *) info->object;
\r
9602 bool *isRunning = &info->isRunning;
\r
9604 while ( *isRunning == true ) {
\r
9605 pthread_testcancel();
\r
9606 object->callbackEvent();
\r
9609 pthread_exit( NULL );
\r
9612 //******************** End of __LINUX_OSS__ *********************//
\r
9616 // *************************************************** //
\r
9618 // Protected common (OS-independent) RtAudio methods.
\r
9620 // *************************************************** //
\r
9622 // This method can be modified to control the behavior of error
\r
9623 // message printing.
\r
9624 void RtApi :: error( RtAudioError::Type type )
\r
9626 errorStream_.str(""); // clear the ostringstream
\r
9628 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9629 if ( errorCallback ) {
\r
9630 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9632 if ( firstErrorOccurred_ )
\r
9635 firstErrorOccurred_ = true;
\r
9636 const std::string errorMessage = errorText_;
\r
9638 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9639 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9643 errorCallback( type, errorMessage );
\r
9644 firstErrorOccurred_ = false;
\r
9648 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9649 std::cerr << '\n' << errorText_ << "\n\n";
\r
9650 else if ( type != RtAudioError::WARNING )
\r
9651 throw( RtAudioError( errorText_, type ) );
\r
9654 void RtApi :: verifyStream()
\r
9656 if ( stream_.state == STREAM_CLOSED ) {
\r
9657 errorText_ = "RtApi:: a stream is not open!";
\r
9658 error( RtAudioError::INVALID_USE );
\r
9662 void RtApi :: clearStreamInfo()
\r
9664 stream_.mode = UNINITIALIZED;
\r
9665 stream_.state = STREAM_CLOSED;
\r
9666 stream_.sampleRate = 0;
\r
9667 stream_.bufferSize = 0;
\r
9668 stream_.nBuffers = 0;
\r
9669 stream_.userFormat = 0;
\r
9670 stream_.userInterleaved = true;
\r
9671 stream_.streamTime = 0.0;
\r
9672 stream_.apiHandle = 0;
\r
9673 stream_.deviceBuffer = 0;
\r
9674 stream_.callbackInfo.callback = 0;
\r
9675 stream_.callbackInfo.userData = 0;
\r
9676 stream_.callbackInfo.isRunning = false;
\r
9677 stream_.callbackInfo.errorCallback = 0;
\r
9678 for ( int i=0; i<2; i++ ) {
\r
9679 stream_.device[i] = 11111;
\r
9680 stream_.doConvertBuffer[i] = false;
\r
9681 stream_.deviceInterleaved[i] = true;
\r
9682 stream_.doByteSwap[i] = false;
\r
9683 stream_.nUserChannels[i] = 0;
\r
9684 stream_.nDeviceChannels[i] = 0;
\r
9685 stream_.channelOffset[i] = 0;
\r
9686 stream_.deviceFormat[i] = 0;
\r
9687 stream_.latency[i] = 0;
\r
9688 stream_.userBuffer[i] = 0;
\r
9689 stream_.convertInfo[i].channels = 0;
\r
9690 stream_.convertInfo[i].inJump = 0;
\r
9691 stream_.convertInfo[i].outJump = 0;
\r
9692 stream_.convertInfo[i].inFormat = 0;
\r
9693 stream_.convertInfo[i].outFormat = 0;
\r
9694 stream_.convertInfo[i].inOffset.clear();
\r
9695 stream_.convertInfo[i].outOffset.clear();
\r
9699 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9701 if ( format == RTAUDIO_SINT16 )
\r
9703 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9705 else if ( format == RTAUDIO_FLOAT64 )
\r
9707 else if ( format == RTAUDIO_SINT24 )
\r
9709 else if ( format == RTAUDIO_SINT8 )
\r
9712 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9713 error( RtAudioError::WARNING );
\r
9718 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9720 if ( mode == INPUT ) { // convert device to user buffer
\r
9721 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9722 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9723 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9724 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9726 else { // convert user to device buffer
\r
9727 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9728 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9729 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9730 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9733 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9734 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9736 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9738 // Set up the interleave/deinterleave offsets.
\r
9739 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9740 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9741 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9742 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9743 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9744 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9745 stream_.convertInfo[mode].inJump = 1;
\r
9749 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9750 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9751 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9752 stream_.convertInfo[mode].outJump = 1;
\r
9756 else { // no (de)interleaving
\r
9757 if ( stream_.userInterleaved ) {
\r
9758 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9759 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9760 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9764 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9765 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9766 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9767 stream_.convertInfo[mode].inJump = 1;
\r
9768 stream_.convertInfo[mode].outJump = 1;
\r
9773 // Add channel offset.
\r
9774 if ( firstChannel > 0 ) {
\r
9775 if ( stream_.deviceInterleaved[mode] ) {
\r
9776 if ( mode == OUTPUT ) {
\r
9777 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9778 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9781 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9782 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9786 if ( mode == OUTPUT ) {
\r
9787 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9788 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9791 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9792 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9798 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9800 // This function does format conversion, input/output channel compensation, and
\r
9801 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9802 // the lower three bytes of a 32-bit integer.
\r
9804 // Clear our device buffer when in/out duplex device channels are different
\r
9805 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9806 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9807 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9810 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9812 Float64 *out = (Float64 *)outBuffer;
\r
9814 if (info.inFormat == RTAUDIO_SINT8) {
\r
9815 signed char *in = (signed char *)inBuffer;
\r
9816 scale = 1.0 / 127.5;
\r
9817 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9818 for (j=0; j<info.channels; j++) {
\r
9819 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9820 out[info.outOffset[j]] += 0.5;
\r
9821 out[info.outOffset[j]] *= scale;
\r
9823 in += info.inJump;
\r
9824 out += info.outJump;
\r
9827 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9828 Int16 *in = (Int16 *)inBuffer;
\r
9829 scale = 1.0 / 32767.5;
\r
9830 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9831 for (j=0; j<info.channels; j++) {
\r
9832 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9833 out[info.outOffset[j]] += 0.5;
\r
9834 out[info.outOffset[j]] *= scale;
\r
9836 in += info.inJump;
\r
9837 out += info.outJump;
\r
9840 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9841 Int24 *in = (Int24 *)inBuffer;
\r
9842 scale = 1.0 / 8388607.5;
\r
9843 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9844 for (j=0; j<info.channels; j++) {
\r
9845 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9846 out[info.outOffset[j]] += 0.5;
\r
9847 out[info.outOffset[j]] *= scale;
\r
9849 in += info.inJump;
\r
9850 out += info.outJump;
\r
9853 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9854 Int32 *in = (Int32 *)inBuffer;
\r
9855 scale = 1.0 / 2147483647.5;
\r
9856 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9857 for (j=0; j<info.channels; j++) {
\r
9858 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9859 out[info.outOffset[j]] += 0.5;
\r
9860 out[info.outOffset[j]] *= scale;
\r
9862 in += info.inJump;
\r
9863 out += info.outJump;
\r
9866 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9867 Float32 *in = (Float32 *)inBuffer;
\r
9868 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9869 for (j=0; j<info.channels; j++) {
\r
9870 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9872 in += info.inJump;
\r
9873 out += info.outJump;
\r
9876 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9877 // Channel compensation and/or (de)interleaving only.
\r
9878 Float64 *in = (Float64 *)inBuffer;
\r
9879 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9880 for (j=0; j<info.channels; j++) {
\r
9881 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9883 in += info.inJump;
\r
9884 out += info.outJump;
\r
9888 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9890 Float32 *out = (Float32 *)outBuffer;
\r
9892 if (info.inFormat == RTAUDIO_SINT8) {
\r
9893 signed char *in = (signed char *)inBuffer;
\r
9894 scale = (Float32) ( 1.0 / 127.5 );
\r
9895 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9896 for (j=0; j<info.channels; j++) {
\r
9897 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9898 out[info.outOffset[j]] += 0.5;
\r
9899 out[info.outOffset[j]] *= scale;
\r
9901 in += info.inJump;
\r
9902 out += info.outJump;
\r
9905 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9906 Int16 *in = (Int16 *)inBuffer;
\r
9907 scale = (Float32) ( 1.0 / 32767.5 );
\r
9908 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9909 for (j=0; j<info.channels; j++) {
\r
9910 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9911 out[info.outOffset[j]] += 0.5;
\r
9912 out[info.outOffset[j]] *= scale;
\r
9914 in += info.inJump;
\r
9915 out += info.outJump;
\r
9918 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9919 Int24 *in = (Int24 *)inBuffer;
\r
9920 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9921 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9922 for (j=0; j<info.channels; j++) {
\r
9923 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9924 out[info.outOffset[j]] += 0.5;
\r
9925 out[info.outOffset[j]] *= scale;
\r
9927 in += info.inJump;
\r
9928 out += info.outJump;
\r
9931 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9932 Int32 *in = (Int32 *)inBuffer;
\r
9933 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9934 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9935 for (j=0; j<info.channels; j++) {
\r
9936 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9937 out[info.outOffset[j]] += 0.5;
\r
9938 out[info.outOffset[j]] *= scale;
\r
9940 in += info.inJump;
\r
9941 out += info.outJump;
\r
9944 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9945 // Channel compensation and/or (de)interleaving only.
\r
9946 Float32 *in = (Float32 *)inBuffer;
\r
9947 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9948 for (j=0; j<info.channels; j++) {
\r
9949 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9951 in += info.inJump;
\r
9952 out += info.outJump;
\r
9955 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9956 Float64 *in = (Float64 *)inBuffer;
\r
9957 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9958 for (j=0; j<info.channels; j++) {
\r
9959 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9961 in += info.inJump;
\r
9962 out += info.outJump;
\r
9966 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9967 Int32 *out = (Int32 *)outBuffer;
\r
9968 if (info.inFormat == RTAUDIO_SINT8) {
\r
9969 signed char *in = (signed char *)inBuffer;
\r
9970 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9971 for (j=0; j<info.channels; j++) {
\r
9972 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9973 out[info.outOffset[j]] <<= 24;
\r
9975 in += info.inJump;
\r
9976 out += info.outJump;
\r
9979 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9980 Int16 *in = (Int16 *)inBuffer;
\r
9981 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9982 for (j=0; j<info.channels; j++) {
\r
9983 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9984 out[info.outOffset[j]] <<= 16;
\r
9986 in += info.inJump;
\r
9987 out += info.outJump;
\r
9990 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9991 Int24 *in = (Int24 *)inBuffer;
\r
9992 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9993 for (j=0; j<info.channels; j++) {
\r
9994 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9995 out[info.outOffset[j]] <<= 8;
\r
9997 in += info.inJump;
\r
9998 out += info.outJump;
\r
10001 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10002 // Channel compensation and/or (de)interleaving only.
\r
10003 Int32 *in = (Int32 *)inBuffer;
\r
10004 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10005 for (j=0; j<info.channels; j++) {
\r
10006 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10008 in += info.inJump;
\r
10009 out += info.outJump;
\r
10012 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10013 Float32 *in = (Float32 *)inBuffer;
\r
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10015 for (j=0; j<info.channels; j++) {
\r
10016 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10018 in += info.inJump;
\r
10019 out += info.outJump;
\r
10022 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10023 Float64 *in = (Float64 *)inBuffer;
\r
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10025 for (j=0; j<info.channels; j++) {
\r
10026 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10028 in += info.inJump;
\r
10029 out += info.outJump;
\r
10033 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10034 Int24 *out = (Int24 *)outBuffer;
\r
10035 if (info.inFormat == RTAUDIO_SINT8) {
\r
10036 signed char *in = (signed char *)inBuffer;
\r
10037 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10038 for (j=0; j<info.channels; j++) {
\r
10039 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10040 //out[info.outOffset[j]] <<= 16;
\r
10042 in += info.inJump;
\r
10043 out += info.outJump;
\r
10046 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10047 Int16 *in = (Int16 *)inBuffer;
\r
10048 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10049 for (j=0; j<info.channels; j++) {
\r
10050 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10051 //out[info.outOffset[j]] <<= 8;
\r
10053 in += info.inJump;
\r
10054 out += info.outJump;
\r
10057 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10058 // Channel compensation and/or (de)interleaving only.
\r
10059 Int24 *in = (Int24 *)inBuffer;
\r
10060 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10061 for (j=0; j<info.channels; j++) {
\r
10062 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10064 in += info.inJump;
\r
10065 out += info.outJump;
\r
10068 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10069 Int32 *in = (Int32 *)inBuffer;
\r
10070 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10071 for (j=0; j<info.channels; j++) {
\r
10072 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10073 //out[info.outOffset[j]] >>= 8;
\r
10075 in += info.inJump;
\r
10076 out += info.outJump;
\r
10079 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10080 Float32 *in = (Float32 *)inBuffer;
\r
10081 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10082 for (j=0; j<info.channels; j++) {
\r
10083 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10085 in += info.inJump;
\r
10086 out += info.outJump;
\r
10089 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10090 Float64 *in = (Float64 *)inBuffer;
\r
10091 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10092 for (j=0; j<info.channels; j++) {
\r
10093 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10095 in += info.inJump;
\r
10096 out += info.outJump;
\r
10100 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10101 Int16 *out = (Int16 *)outBuffer;
\r
10102 if (info.inFormat == RTAUDIO_SINT8) {
\r
10103 signed char *in = (signed char *)inBuffer;
\r
10104 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10105 for (j=0; j<info.channels; j++) {
\r
10106 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10107 out[info.outOffset[j]] <<= 8;
\r
10109 in += info.inJump;
\r
10110 out += info.outJump;
\r
10113 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10114 // Channel compensation and/or (de)interleaving only.
\r
10115 Int16 *in = (Int16 *)inBuffer;
\r
10116 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10117 for (j=0; j<info.channels; j++) {
\r
10118 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10120 in += info.inJump;
\r
10121 out += info.outJump;
\r
10124 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10125 Int24 *in = (Int24 *)inBuffer;
\r
10126 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10127 for (j=0; j<info.channels; j++) {
\r
10128 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10130 in += info.inJump;
\r
10131 out += info.outJump;
\r
10134 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10135 Int32 *in = (Int32 *)inBuffer;
\r
10136 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10137 for (j=0; j<info.channels; j++) {
\r
10138 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10140 in += info.inJump;
\r
10141 out += info.outJump;
\r
10144 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10145 Float32 *in = (Float32 *)inBuffer;
\r
10146 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10147 for (j=0; j<info.channels; j++) {
\r
10148 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10150 in += info.inJump;
\r
10151 out += info.outJump;
\r
10154 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10155 Float64 *in = (Float64 *)inBuffer;
\r
10156 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10157 for (j=0; j<info.channels; j++) {
\r
10158 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10160 in += info.inJump;
\r
10161 out += info.outJump;
\r
10165 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10166 signed char *out = (signed char *)outBuffer;
\r
10167 if (info.inFormat == RTAUDIO_SINT8) {
\r
10168 // Channel compensation and/or (de)interleaving only.
\r
10169 signed char *in = (signed char *)inBuffer;
\r
10170 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10171 for (j=0; j<info.channels; j++) {
\r
10172 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10174 in += info.inJump;
\r
10175 out += info.outJump;
\r
10178 if (info.inFormat == RTAUDIO_SINT16) {
\r
10179 Int16 *in = (Int16 *)inBuffer;
\r
10180 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10181 for (j=0; j<info.channels; j++) {
\r
10182 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10184 in += info.inJump;
\r
10185 out += info.outJump;
\r
10188 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10189 Int24 *in = (Int24 *)inBuffer;
\r
10190 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10191 for (j=0; j<info.channels; j++) {
\r
10192 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10194 in += info.inJump;
\r
10195 out += info.outJump;
\r
10198 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10199 Int32 *in = (Int32 *)inBuffer;
\r
10200 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10201 for (j=0; j<info.channels; j++) {
\r
10202 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10204 in += info.inJump;
\r
10205 out += info.outJump;
\r
10208 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10209 Float32 *in = (Float32 *)inBuffer;
\r
10210 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10211 for (j=0; j<info.channels; j++) {
\r
10212 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10214 in += info.inJump;
\r
10215 out += info.outJump;
\r
10218 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10219 Float64 *in = (Float64 *)inBuffer;
\r
10220 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10221 for (j=0; j<info.channels; j++) {
\r
10222 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10224 in += info.inJump;
\r
10225 out += info.outJump;
\r
10231 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10232 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10233 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10235 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10241 if ( format == RTAUDIO_SINT16 ) {
\r
10242 for ( unsigned int i=0; i<samples; i++ ) {
\r
10243 // Swap 1st and 2nd bytes.
\r
10245 *(ptr) = *(ptr+1);
\r
10248 // Increment 2 bytes.
\r
10252 else if ( format == RTAUDIO_SINT32 ||
\r
10253 format == RTAUDIO_FLOAT32 ) {
\r
10254 for ( unsigned int i=0; i<samples; i++ ) {
\r
10255 // Swap 1st and 4th bytes.
\r
10257 *(ptr) = *(ptr+3);
\r
10260 // Swap 2nd and 3rd bytes.
\r
10263 *(ptr) = *(ptr+1);
\r
10266 // Increment 3 more bytes.
\r
10270 else if ( format == RTAUDIO_SINT24 ) {
\r
10271 for ( unsigned int i=0; i<samples; i++ ) {
\r
10272 // Swap 1st and 3rd bytes.
\r
10274 *(ptr) = *(ptr+2);
\r
10277 // Increment 2 more bytes.
\r
10281 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10282 for ( unsigned int i=0; i<samples; i++ ) {
\r
10283 // Swap 1st and 8th bytes
\r
10285 *(ptr) = *(ptr+7);
\r
10288 // Swap 2nd and 7th bytes
\r
10291 *(ptr) = *(ptr+5);
\r
10294 // Swap 3rd and 6th bytes
\r
10297 *(ptr) = *(ptr+3);
\r
10300 // Swap 4th and 5th bytes
\r
10303 *(ptr) = *(ptr+1);
\r
10306 // Increment 5 more bytes.
\r
10312 // Indentation settings for Vim and Emacs
\r
10314 // Local Variables:
\r
10315 // c-basic-offset: 2
\r
10316 // indent-tabs-mode: nil
\r
10319 // vim: et sts=2 sw=2
\r